This commit is contained in:
2025-09-07 22:09:54 +02:00
parent e1b817252c
commit 2fc0d000b6
7796 changed files with 2159515 additions and 933 deletions

View File

@ -0,0 +1,127 @@
from pandas import (
api as api,
arrays as arrays,
errors as errors,
io as io,
plotting as plotting,
testing as testing,
tseries as tseries,
util as util,
)
from pandas.core.api import (
NA as NA,
ArrowDtype as ArrowDtype,
BooleanDtype as BooleanDtype,
Categorical as Categorical,
CategoricalDtype as CategoricalDtype,
CategoricalIndex as CategoricalIndex,
DataFrame as DataFrame,
DateOffset as DateOffset,
DatetimeIndex as DatetimeIndex,
DatetimeTZDtype as DatetimeTZDtype,
Float32Dtype as Float32Dtype,
Float64Dtype as Float64Dtype,
Grouper as Grouper,
Index as Index,
IndexSlice as IndexSlice,
Int8Dtype as Int8Dtype,
Int16Dtype as Int16Dtype,
Int32Dtype as Int32Dtype,
Int64Dtype as Int64Dtype,
Interval as Interval,
IntervalDtype as IntervalDtype,
IntervalIndex as IntervalIndex,
MultiIndex as MultiIndex,
NamedAgg as NamedAgg,
NaT as NaT,
Period as Period,
PeriodDtype as PeriodDtype,
PeriodIndex as PeriodIndex,
RangeIndex as RangeIndex,
Series as Series,
StringDtype as StringDtype,
Timedelta as Timedelta,
TimedeltaIndex as TimedeltaIndex,
Timestamp as Timestamp,
UInt8Dtype as UInt8Dtype,
UInt16Dtype as UInt16Dtype,
UInt32Dtype as UInt32Dtype,
UInt64Dtype as UInt64Dtype,
array as array,
bdate_range as bdate_range,
date_range as date_range,
factorize as factorize,
interval_range as interval_range,
isna as isna,
isnull as isnull,
notna as notna,
notnull as notnull,
period_range as period_range,
set_eng_float_format as set_eng_float_format,
timedelta_range as timedelta_range,
to_datetime as to_datetime,
to_numeric as to_numeric,
to_timedelta as to_timedelta,
unique as unique,
value_counts as value_counts,
)
from pandas.core.arrays.sparse import SparseDtype as SparseDtype
from pandas.core.computation.api import eval as eval
from pandas.core.reshape.api import (
concat as concat,
crosstab as crosstab,
cut as cut,
from_dummies as from_dummies,
get_dummies as get_dummies,
lreshape as lreshape,
melt as melt,
merge as merge,
merge_asof as merge_asof,
merge_ordered as merge_ordered,
pivot as pivot,
pivot_table as pivot_table,
qcut as qcut,
wide_to_long as wide_to_long,
)
from pandas._config import (
describe_option as describe_option,
get_option as get_option,
option_context as option_context,
options as options,
reset_option as reset_option,
set_option as set_option,
)
from pandas.util._print_versions import show_versions as show_versions
from pandas.util._tester import test as test
from pandas.io.api import (
ExcelFile as ExcelFile,
ExcelWriter as ExcelWriter,
HDFStore as HDFStore,
read_clipboard as read_clipboard,
read_csv as read_csv,
read_excel as read_excel,
read_feather as read_feather,
read_fwf as read_fwf,
read_hdf as read_hdf,
read_html as read_html,
read_json as read_json,
read_orc as read_orc,
read_parquet as read_parquet,
read_pickle as read_pickle,
read_sas as read_sas,
read_spss as read_spss,
read_sql as read_sql,
read_sql_query as read_sql_query,
read_sql_table as read_sql_table,
read_stata as read_stata,
read_table as read_table,
read_xml as read_xml,
)
from pandas.io.json._normalize import json_normalize as json_normalize
from pandas.tseries import offsets as offsets
from pandas.tseries.api import infer_freq as infer_freq
__version__: str

View File

@ -0,0 +1,8 @@
from pandas._config.config import (
describe_option as describe_option,
get_option as get_option,
option_context as option_context,
options as options,
reset_option as reset_option,
set_option as set_option,
)

View File

@ -0,0 +1,184 @@
from collections.abc import (
Callable,
Iterable,
)
from contextlib import ContextDecorator
from typing import (
Any,
Literal,
overload,
)
def get_option(pat: str) -> Any: ...
def set_option(pat: str, val: object) -> None: ...
def reset_option(pat: str) -> None: ...
@overload
def describe_option(pat: str, _print_desc: Literal[False]) -> str: ...
@overload
def describe_option(pat: str, _print_desc: Literal[True] = ...) -> None: ...
class DictWrapper:
def __init__(self, d: dict[str, Any], prefix: str = ...) -> None: ...
def __setattr__(
self, key: str, val: str | bool | int | DictWrapper | None
) -> None: ...
def __getattr__(self, key: str) -> str | bool | int | DictWrapper | None: ...
def __dir__(self) -> Iterable[str]: ...
class Compute(DictWrapper):
use_bottleneck: bool
use_numba: bool
use_numexpr: bool
class DisplayHTML(DictWrapper):
border: int
table_schema: bool
use_mathjax: bool
class DisplayLaTeX(DictWrapper):
escape: bool
longtable: bool
multicolumn: bool
multicolumn_format: str
multirow: bool
repr: bool
class DisplayUnicode(DictWrapper):
ambiguous_as_wide: bool
east_asian_width: bool
class Display(DictWrapper):
chop_threshold: float | None
colheader_justify: Literal["left", "right"]
date_dayfirst: bool
date_yearfirst: bool
encoding: str
expand_frame_repr: bool
float_format: Callable[[float], str] | None
html: DisplayHTML
large_repr: Literal["truncate", "info"]
latex: DisplayLaTeX
max_categories: int
max_columns: int | None
max_colwidth: int | None
max_dir_items: int | None
max_info_columns: int
max_info_rows: int
max_rows: int | None
max_seq_items: int | None
memory_usage: bool | Literal["deep"] | None
min_rows: int | None
multi_sparse: bool
notebook_repr_html: bool
pprint_nest_depth: int
precision: int
show_dimensions: bool | Literal["truncate"]
unicode: DisplayUnicode
width: int
class IOExcelODS(DictWrapper):
reader: str
writer: str
class IOExcelXLS(DictWrapper):
writer: str
class IOExcelXLSB(DictWrapper):
reader: str
class IOExcelXLSM(DictWrapper):
reader: str
writer: str
class IOExcelXLSX(DictWrapper):
reader: str
writer: str
class IOExcel(DictWrapper):
ods: IOExcelODS
xls: DictWrapper
xlsb: DictWrapper
xlsm: DictWrapper
xlsx: DictWrapper
class IOHDF(DictWrapper):
default_format: Literal["table", "fixed"] | None
dropna_table: bool
class IOParquet(DictWrapper):
engine: str
class IOSQL(DictWrapper):
engine: str
class IO(DictWrapper):
excel: IOExcel
hdf: IOHDF
parquet: IOParquet
sql: IOSQL
class Mode(DictWrapper):
chained_assignment: Literal["warn", "raise"] | None
data_manager: str
sim_interactive: bool
string_storage: str
use_inf_as_na: bool
class PlottingMatplotlib(DictWrapper):
register_converters: str
class Plotting(DictWrapper):
backend: str
matplotlib: PlottingMatplotlib
class StylerFormat:
decimal: str
escape: str | None
formatter: str | None
na_rep: str | None
precision: int
thousands: str | None
class StylerHTML:
mathjax: bool
class StylerLatex:
environment: str | None
hrules: bool
multicol_align: str
multirow_align: str
class StylerRender:
encoding: str
max_columns: int | None
max_elements: int
max_rows: int | None
repr: str
class StylerSparse:
columns: bool
index: bool
class Styler(DictWrapper):
format: StylerFormat
html: StylerHTML
latex: StylerLatex
render: StylerRender
sparse: StylerSparse
class Options(DictWrapper):
compute: Compute
display: Display
io: IO
mode: Mode
plotting: Plotting
styler: Styler
options: Options
class option_context(ContextDecorator):
def __init__(self, /, pat: str, val: Any, *args: Any) -> None: ...
def __enter__(self) -> None: ...
def __exit__(self, *args: object) -> None: ...
class OptionError(AttributeError, KeyError): ...

View File

@ -0,0 +1,10 @@
from pandas._libs.interval import Interval as Interval
from pandas._libs.tslibs import (
NaT as NaT,
NaTType as NaTType,
OutOfBoundsDatetime as OutOfBoundsDatetime,
Period as Period,
Timedelta as Timedelta,
Timestamp as Timestamp,
iNaT as iNaT,
)

View File

@ -0,0 +1,4 @@
class _NDFrameIndexerBase:
def __init__(self, name: str, obj: object) -> None: ...
@property
def ndim(self) -> int: ...

View File

@ -0,0 +1,247 @@
from typing import (
Any,
Generic,
Literal,
TypeVar,
overload,
)
import numpy as np
from pandas import (
IntervalIndex,
Series,
Timedelta,
Timestamp,
)
from pandas.core.series import (
TimedeltaSeries,
TimestampSeries,
)
from pandas._typing import (
IntervalClosedType,
IntervalT,
np_1darray,
npt,
)
VALID_CLOSED: frozenset[str]
_OrderableScalarT = TypeVar("_OrderableScalarT", bound=int | float)
_OrderableTimesT = TypeVar("_OrderableTimesT", bound=Timestamp | Timedelta)
_OrderableT = TypeVar("_OrderableT", bound=int | float | Timestamp | Timedelta)
class _LengthDescriptor:
@overload
def __get__(
self, instance: Interval[_OrderableScalarT], owner: Any
) -> _OrderableScalarT: ...
@overload
def __get__(
self, instance: Interval[_OrderableTimesT], owner: Any
) -> Timedelta: ...
@overload
def __get__(self, instance: IntervalTree, owner: Any) -> np.ndarray: ...
class _MidDescriptor:
@overload
def __get__(self, instance: Interval[_OrderableScalarT], owner: Any) -> float: ...
@overload
def __get__(
self, instance: Interval[_OrderableTimesT], owner: Any
) -> _OrderableTimesT: ...
@overload
def __get__(self, instance: IntervalTree, owner: Any) -> np.ndarray: ...
class IntervalMixin:
@property
def closed_left(self) -> bool: ...
@property
def closed_right(self) -> bool: ...
@property
def open_left(self) -> bool: ...
@property
def open_right(self) -> bool: ...
@property
def is_empty(self) -> bool: ...
class Interval(IntervalMixin, Generic[_OrderableT]):
@property
def left(self: Interval[_OrderableT]) -> _OrderableT: ...
@property
def right(self: Interval[_OrderableT]) -> _OrderableT: ...
@property
def closed(self) -> IntervalClosedType: ...
mid: _MidDescriptor
length: _LengthDescriptor
def __init__(
self,
left: _OrderableT,
right: _OrderableT,
closed: IntervalClosedType = ...,
) -> None: ...
def __hash__(self) -> int: ...
# for __contains__, it seems that we have to separate out the 4 cases to make
# mypy happy
@overload
def __contains__(self: Interval[Timestamp], key: Timestamp) -> bool: ...
@overload
def __contains__(self: Interval[Timedelta], key: Timedelta) -> bool: ...
@overload
def __contains__(self: Interval[int], key: float) -> bool: ...
@overload
def __contains__(self: Interval[float], key: float) -> bool: ...
@overload
def __add__(self: Interval[Timestamp], y: Timedelta) -> Interval[Timestamp]: ...
@overload
def __add__(self: Interval[Timedelta], y: Timedelta) -> Interval[Timedelta]: ...
@overload
def __add__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __add__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __radd__(
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
def __radd__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __radd__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __sub__(
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
def __sub__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __sub__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __rsub__(
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
def __rsub__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __rsub__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __mul__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __mul__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __mul__(self: Interval[Timedelta], y: float) -> Interval[Timedelta]: ...
@overload
def __rmul__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __rmul__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __rmul__(self: Interval[Timedelta], y: float) -> Interval[Timedelta]: ...
@overload
def __truediv__(self: Interval[int], y: float) -> Interval[float]: ...
@overload
def __truediv__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __truediv__(self: Interval[Timedelta], y: float) -> Interval[Timedelta]: ...
@overload
def __floordiv__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __floordiv__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __floordiv__(self: Interval[Timedelta], y: float) -> Interval[Timedelta]: ...
@overload
def overlaps(self: Interval[_OrderableT], other: Interval[_OrderableT]) -> bool: ...
@overload
def overlaps(self: Interval[int], other: Interval[float]) -> bool: ...
@overload
def overlaps(self: Interval[float], other: Interval[int]) -> bool: ...
@overload
def __gt__(self, other: Interval[_OrderableT]) -> bool: ...
@overload
def __gt__(
self: IntervalT, other: IntervalIndex[IntervalT]
) -> np_1darray[np.bool]: ...
@overload
def __gt__(
self,
other: Series[int] | Series[float] | TimestampSeries | TimedeltaSeries,
) -> Series[bool]: ...
@overload
def __lt__(self, other: Interval[_OrderableT]) -> bool: ...
@overload
def __lt__(
self: IntervalT, other: IntervalIndex[IntervalT]
) -> np_1darray[np.bool]: ...
@overload
def __lt__(
self,
other: Series[int] | Series[float] | TimestampSeries | TimedeltaSeries,
) -> Series[bool]: ...
@overload
def __ge__(self, other: Interval[_OrderableT]) -> bool: ...
@overload
def __ge__(
self: IntervalT, other: IntervalIndex[IntervalT]
) -> np_1darray[np.bool]: ...
@overload
def __ge__(
self,
other: Series[int] | Series[float] | TimestampSeries | TimedeltaSeries,
) -> Series[bool]: ...
@overload
def __le__(self, other: Interval[_OrderableT]) -> bool: ...
@overload
def __le__(
self: IntervalT, other: IntervalIndex[IntervalT]
) -> np_1darray[np.bool]: ...
@overload
def __eq__(self, other: Interval[_OrderableT]) -> bool: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __eq__(
self: IntervalT, other: IntervalIndex[IntervalT]
) -> np_1darray[np.bool]: ...
@overload
def __eq__(self, other: Series[_OrderableT]) -> Series[bool]: ... # type: ignore[overload-overlap]
@overload
def __eq__(self, other: object) -> Literal[False]: ...
@overload
def __ne__(self, other: Interval[_OrderableT]) -> bool: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __ne__(
self: IntervalT, other: IntervalIndex[IntervalT]
) -> np_1darray[np.bool]: ...
@overload
def __ne__(self, other: Series[_OrderableT]) -> Series[bool]: ... # type: ignore[overload-overlap]
@overload
def __ne__(self, other: object) -> Literal[True]: ...
class IntervalTree(IntervalMixin):
def __init__(
self,
left: np.ndarray,
right: np.ndarray,
closed: IntervalClosedType = ...,
leaf_size: int = ...,
) -> None: ...
def get_indexer(self, target) -> npt.NDArray[np.intp]: ...
def get_indexer_non_unique(
self, target
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
_na_count: int
@property
def is_overlapping(self) -> bool: ...
@property
def is_monotonic_increasing(self) -> bool: ...
def clear_mapping(self) -> None: ...

View File

@ -0,0 +1,4 @@
def decode(*args, **kwargs): ...
def dumps(*args, **kwargs): ...
def encode(*args, **kwargs): ...
def loads(*args, **kwargs): ...

View File

@ -0,0 +1,26 @@
from enum import Enum
from typing import (
Final,
Literal,
)
import numpy as np
from typing_extensions import (
TypeAlias,
TypeGuard,
)
class _NoDefault(Enum):
no_default = ...
no_default: Final = _NoDefault.no_default
_NoDefaultDoNotUse: TypeAlias = Literal[_NoDefault.no_default] # noqa: PYI047
def infer_dtype(value: object, skipna: bool = True) -> str: ...
def is_iterator(obj: object) -> bool: ...
def is_scalar(val: object) -> bool: ...
def is_list_like(obj: object, allow_sets: bool = True) -> bool: ...
def is_complex(val: object) -> TypeGuard[complex]: ...
def is_bool(val: object) -> TypeGuard[bool | np.bool_]: ...
def is_integer(val: object) -> TypeGuard[int | np.integer]: ...
def is_float(val: object) -> TypeGuard[float | np.floating]: ...

View File

@ -0,0 +1,46 @@
from typing_extensions import Self
class NAType:
def __new__(cls, *args, **kwargs) -> Self: ...
def __format__(self, format_spec: str) -> str: ...
def __bool__(self) -> None: ...
def __hash__(self) -> int: ...
def __reduce__(self) -> str: ...
def __add__(self, other) -> NAType: ...
def __radd__(self, other) -> NAType: ...
def __sub__(self, other) -> NAType: ...
def __rsub__(self, other) -> NAType: ...
def __mul__(self, other) -> NAType: ...
def __rmul__(self, other) -> NAType: ...
def __matmul__(self, other) -> NAType: ...
def __rmatmul__(self, other) -> NAType: ...
def __truediv__(self, other) -> NAType: ...
def __rtruediv__(self, other) -> NAType: ...
def __floordiv__(self, other) -> NAType: ...
def __rfloordiv__(self, other) -> NAType: ...
def __mod__(self, other) -> NAType: ...
def __rmod__(self, other) -> NAType: ...
def __divmod__(self, other) -> NAType: ...
def __rdivmod__(self, other) -> NAType: ...
def __eq__(self, other) -> bool: ...
def __ne__(self, other) -> bool: ...
def __le__(self, other) -> bool: ...
def __lt__(self, other) -> bool: ...
def __gt__(self, other) -> bool: ...
def __ge__(self, other) -> bool: ...
def __neg__(self, other) -> NAType: ...
def __pos__(self, other) -> NAType: ...
def __abs__(self, other) -> NAType: ...
def __invert__(self, other) -> NAType: ...
def __pow__(self, other) -> NAType: ...
def __rpow__(self, other) -> NAType: ...
def __and__(self, other) -> NAType | None: ...
__rand__ = __and__
def __or__(self, other) -> bool | NAType: ...
__ror__ = __or__
def __xor__(self, other) -> NAType: ...
__rxor__ = __xor__
__array_priority__: int
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ...
NA: NAType = ...

View File

@ -0,0 +1,16 @@
from collections.abc import Callable
class CachedProperty:
def __init__(self, func: Callable) -> None: ...
def __get__(self, obj, typ): ...
def __set__(self, obj, value) -> None: ...
# note: this is a lie to make type checkers happy (they special
# case property). cache_readonly uses attribute names similar to
# property (fget) but it does not provide fset and fdel.
cache_readonly = property
class AxisProperty:
def __init__(self, axis: int = ..., doc: str = ...) -> None: ...
def __get__(self, obj, typ): ...
def __set__(self, obj, value) -> None: ...

View File

@ -0,0 +1,3 @@
class SparseIndex: ...
class BlockIndex(SparseIndex): ...
class IntIndex(SparseIndex): ...

View File

@ -0,0 +1,29 @@
__all__ = [
"Period",
"Timestamp",
"Timedelta",
"NaT",
"NaTType",
"iNaT",
"nat_strings",
"BaseOffset",
"Tick",
"OutOfBoundsDatetime",
]
from pandas._libs.tslibs.nattype import (
NaT,
NaTType,
iNaT,
nat_strings,
)
from pandas._libs.tslibs.np_datetime import (
OutOfBoundsDatetime as OutOfBoundsDatetime,
OutOfBoundsTimedelta as OutOfBoundsTimedelta,
)
from pandas._libs.tslibs.offsets import (
BaseOffset,
Tick,
)
from pandas._libs.tslibs.period import Period
from pandas._libs.tslibs.timedeltas import Timedelta
from pandas._libs.tslibs.timestamps import Timestamp

View File

@ -0,0 +1,3 @@
from datetime import datetime
class ABCTimestamp(datetime): ...

View File

@ -0,0 +1 @@
class OutOfBoundsTimedelta(ValueError): ...

View File

@ -0,0 +1,157 @@
# pyright: strict
from datetime import (
datetime,
timedelta,
tzinfo as _tzinfo,
)
from typing import Literal
import numpy as np
from typing_extensions import (
Self,
TypeAlias,
)
from pandas._libs.tslibs.period import Period
from pandas._typing import (
Frequency,
NpDtype,
TimestampNonexistent,
TimeUnit,
)
NaT: NaTType
iNaT: int
nat_strings: set[str]
_NaTComparisonTypes: TypeAlias = (
datetime | timedelta | Period | np.datetime64 | np.timedelta64
)
class _NatComparison:
def __call__(self, other: _NaTComparisonTypes) -> bool: ...
class NaTType:
value: np.int64
def __hash__(self) -> int: ...
def asm8(self) -> np.datetime64: ...
def to_datetime64(self) -> np.datetime64: ...
def to_numpy(
self, dtype: NpDtype | None = ..., copy: bool = ...
) -> np.datetime64 | np.timedelta64: ...
@property
def is_leap_year(self) -> bool: ...
@property
def is_month_start(self) -> bool: ...
@property
def is_quarter_start(self) -> bool: ...
@property
def is_year_start(self) -> bool: ...
@property
def is_month_end(self) -> bool: ...
@property
def is_quarter_end(self) -> bool: ...
@property
def is_year_end(self) -> bool: ...
@property
def day_of_year(self) -> float: ...
@property
def dayofyear(self) -> float: ...
@property
def days_in_month(self) -> float: ...
@property
def daysinmonth(self) -> float: ...
@property
def day_of_week(self) -> float: ...
@property
def dayofweek(self) -> float: ...
@property
def week(self) -> float: ...
@property
def weekofyear(self) -> float: ...
def day_name(self) -> float: ...
def month_name(self) -> float: ...
def weekday(self) -> float: ...
def isoweekday(self) -> float: ...
def total_seconds(self) -> float: ...
def today(self, tz: _tzinfo | str | None = None) -> NaTType: ...
def now(self, tz: _tzinfo | str | None = None) -> NaTType: ...
def to_pydatetime(self) -> NaTType: ...
def date(self) -> NaTType: ...
def round(
self,
freq: Frequency,
ambiguous: bool | Literal["raise"] | NaTType = "raise",
nonexistent: TimestampNonexistent = "raise",
) -> NaTType: ...
def floor(
self,
freq: Frequency,
ambiguous: bool | Literal["raise"] | NaTType = "raise",
nonexistent: TimestampNonexistent = "raise",
) -> NaTType: ...
def ceil(
self,
freq: Frequency,
ambiguous: bool | Literal["raise"] | NaTType = "raise",
nonexistent: TimestampNonexistent = "raise",
) -> NaTType: ...
def tz_convert(self) -> NaTType: ...
def tz_localize(
self,
tz: _tzinfo | str | None,
ambiguous: bool | Literal["raise"] | NaTType = "raise",
nonexistent: TimestampNonexistent = "raise",
) -> NaTType: ...
def replace(
self,
year: int | None = ...,
month: int | None = ...,
day: int | None = ...,
hour: int | None = ...,
minute: int | None = ...,
second: int | None = ...,
microsecond: int | None = ...,
nanosecond: int | None = ...,
tzinfo: _tzinfo | None = ...,
fold: int | None = ...,
) -> NaTType: ...
@property
def year(self) -> float: ...
@property
def quarter(self) -> float: ...
@property
def month(self) -> float: ...
@property
def day(self) -> float: ...
@property
def hour(self) -> float: ...
@property
def minute(self) -> float: ...
@property
def second(self) -> float: ...
@property
def millisecond(self) -> float: ...
@property
def microsecond(self) -> float: ...
@property
def nanosecond(self) -> float: ...
# inject Timedelta properties
@property
def days(self) -> float: ...
@property
def microseconds(self) -> float: ...
@property
def nanoseconds(self) -> float: ...
# inject Period properties
@property
def qyear(self) -> float: ...
def __eq__(self, other: object) -> bool: ...
def __ne__(self, other: object) -> bool: ...
__lt__: _NatComparison
__le__: _NatComparison
__gt__: _NatComparison
__ge__: _NatComparison
@property
def unit(self) -> TimeUnit: ...
def as_unit(self, unit: TimeUnit, round_ok: bool = True) -> Self: ...

View File

@ -0,0 +1,2 @@
class OutOfBoundsDatetime(ValueError): ...
class OutOfBoundsTimedelta(ValueError): ...

View File

@ -0,0 +1,255 @@
from collections.abc import Collection
from datetime import (
date,
datetime,
time,
timedelta,
)
from typing import (
Any,
Literal,
TypeVar,
overload,
)
from dateutil.relativedelta import weekday as WeekdayClass
import numpy as np
from pandas import Timestamp
from typing_extensions import Self
from pandas._typing import npt
from pandas.tseries.holiday import AbstractHolidayCalendar
_DatetimeT = TypeVar("_DatetimeT", bound=datetime)
_TimedeltaT = TypeVar("_TimedeltaT", bound=timedelta)
class ApplyTypeError(TypeError): ...
class BaseOffset:
n: int
def __init__(self, n: int = ..., normalize: bool = ...) -> None: ...
def __eq__(self, other: object) -> bool: ...
def __ne__(self, other: object) -> bool: ...
def __hash__(self) -> int: ...
@property
def kwds(self) -> dict[str, Any]: ...
@property
def base(self) -> BaseOffset: ...
@overload
def __add__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
@overload
def __add__(self, other: _DatetimeT) -> _DatetimeT: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __add__(self, other: date) -> Timestamp: ...
@overload
def __add__(self, other: BaseOffset) -> Self: ...
@overload
def __add__(self, other: _TimedeltaT) -> _TimedeltaT: ...
@overload
def __radd__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
@overload
def __radd__(self, other: _DatetimeT) -> _DatetimeT: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __radd__(self, other: date) -> Timestamp: ...
@overload
def __radd__(self, other: BaseOffset) -> Self: ...
@overload
def __radd__(self, other: _TimedeltaT) -> _TimedeltaT: ...
def __sub__(self, other: BaseOffset) -> Self: ...
@overload
def __rsub__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
@overload
def __rsub__(self, other: _DatetimeT) -> _DatetimeT: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __rsub__(self, other: date) -> Timestamp: ...
@overload
def __rsub__(self, other: BaseOffset) -> Self: ...
@overload
def __rsub__(self, other: _TimedeltaT) -> _TimedeltaT: ...
@overload
def __mul__(self, other: np.ndarray) -> np.ndarray: ...
@overload
def __mul__(self, other: int) -> Self: ...
@overload
def __rmul__(self, other: np.ndarray) -> np.ndarray: ...
@overload
def __rmul__(self, other: int) -> Self: ...
def __neg__(self) -> Self: ...
def copy(self) -> Self: ...
@property
def name(self) -> str: ...
@property
def rule_code(self) -> str: ...
@property
def freqstr(self) -> str: ...
def rollback(self, dt: datetime) -> datetime: ...
def rollforward(self, dt: datetime) -> datetime: ...
def is_on_offset(self, dt: datetime) -> bool: ...
@property
def nanos(self) -> int: ...
class SingleConstructorOffset(BaseOffset): ...
class Tick(SingleConstructorOffset):
def __init__(self, n: int = ..., normalize: bool = ...) -> None: ...
@property
def nanos(self) -> int: ...
class Day(Tick): ...
class Hour(Tick): ...
class Minute(Tick): ...
class Second(Tick): ...
class Milli(Tick): ...
class Micro(Tick): ...
class Nano(Tick): ...
class RelativeDeltaOffset(BaseOffset):
def __init__(self, n: int = ..., normalize: bool = ..., **kwds: Any) -> None: ...
# Changed from implementation because it is not allowed for `PeriodDtype`
class BusinessDay(BaseOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., offset: timedelta = ...
) -> None: ...
def __reduce__(self): ...
class BusinessHour(SingleConstructorOffset):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
start: str | time | Collection[str | time] = ...,
end: str | time | Collection[str | time] = ...,
offset: timedelta = ...,
) -> None: ...
class WeekOfMonthMixin(SingleConstructorOffset):
def __init__(
self, n: int = ..., weekday: Literal[0, 1, 2, 3, 4, 5, 6] = ...
) -> None: ...
class YearOffset(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., month: int | None = ...
) -> None: ...
class BYearEnd(YearOffset): ...
class BYearBegin(YearOffset): ...
class YearEnd(YearOffset): ...
class YearBegin(YearOffset): ...
class QuarterOffset(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., startingMonth: int | None = ...
) -> None: ...
class BQuarterEnd(QuarterOffset): ...
class BQuarterBegin(QuarterOffset): ...
class QuarterEnd(QuarterOffset): ...
class QuarterBegin(QuarterOffset): ...
class MonthOffset(SingleConstructorOffset): ...
class MonthEnd(MonthOffset): ...
class MonthBegin(MonthOffset): ...
class BusinessMonthEnd(MonthOffset): ...
class BusinessMonthBegin(MonthOffset): ...
class SemiMonthOffset(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., day_of_month: int | None = ...
) -> None: ...
class SemiMonthEnd(SemiMonthOffset): ...
class SemiMonthBegin(SemiMonthOffset): ...
class Week(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., weekday: int | None = ...
) -> None: ...
class WeekOfMonth(WeekOfMonthMixin):
def __init__(
self, n: int = ..., normalize: bool = ..., week: int = ..., weekday: int = ...
) -> None: ...
class LastWeekOfMonth(WeekOfMonthMixin): ...
class FY5253Mixin(SingleConstructorOffset):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
weekday: int = ...,
startingMonth: int = ...,
variation: str = ...,
) -> None: ...
class FY5253(FY5253Mixin): ...
class FY5253Quarter(FY5253Mixin): ...
class Easter(SingleConstructorOffset): ...
class _CustomBusinessMonth(SingleConstructorOffset):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
offset: timedelta = ...,
holidays: list | None = ...,
) -> None: ...
class CustomBusinessDay(BusinessDay):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
holidays: list = ...,
calendar: AbstractHolidayCalendar | np.busdaycalendar = ...,
) -> None: ...
class CustomBusinessHour(BusinessHour):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
start: str | time | Collection[str | time] = ...,
end: str | time | Collection[str | time] = ...,
offset: timedelta = ...,
holidays: list | None = ...,
) -> None: ...
class CustomBusinessMonthEnd(_CustomBusinessMonth): ...
class CustomBusinessMonthBegin(_CustomBusinessMonth): ...
class DateOffset(RelativeDeltaOffset):
def __init__(
self,
*,
n: int = ...,
normalize: bool = ...,
years: int = ...,
months: int = ...,
weeks: int = ...,
days: int = ...,
hours: int = ...,
minutes: int = ...,
seconds: int = ...,
milliseconds: int = ...,
microseconds: int = ...,
nanoseconds: int = ...,
year: int = ...,
month: int = ...,
day: int = ...,
weekday: int | WeekdayClass = ...,
hour: int = ...,
minute: int = ...,
second: int = ...,
microsecond: int = ...,
nanosecond: int = ...,
) -> None: ...
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
CBMonthEnd = CustomBusinessMonthEnd
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay

View File

@ -0,0 +1,2 @@
class DateParseError(ValueError):
def __init__(self, *args, **kwargs) -> None: ...

View File

@ -0,0 +1,232 @@
import datetime
from typing import (
Literal,
overload,
)
import numpy as np
from pandas import (
Index,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
)
from pandas.core.series import (
OffsetSeries,
PeriodSeries,
TimedeltaSeries,
)
from typing_extensions import TypeAlias
from pandas._libs.tslibs import NaTType
from pandas._libs.tslibs.offsets import BaseOffset
from pandas._libs.tslibs.timestamps import Timestamp
from pandas._typing import (
ShapeT,
np_1darray,
np_ndarray,
)
class IncompatibleFrequency(ValueError): ...
_PeriodAddSub: TypeAlias = (
Timedelta | datetime.timedelta | np.timedelta64 | np.int64 | int | BaseOffset
)
_PeriodFreqHow: TypeAlias = Literal[
"S",
"E",
"start",
"end",
]
_PeriodToTimestampHow: TypeAlias = (
_PeriodFreqHow
| Literal[
"Start",
"Finish",
"Begin",
"End",
"s",
"e",
"finish",
"begin",
]
)
class PeriodMixin:
@property
def end_time(self) -> Timestamp: ...
@property
def start_time(self) -> Timestamp: ...
class Period(PeriodMixin):
def __init__(
self,
value: (
Period | str | datetime.datetime | datetime.date | Timestamp | None
) = ...,
freq: str | BaseOffset | None = ...,
ordinal: int | None = ...,
year: int | None = ...,
month: int | None = ...,
quarter: int | None = ...,
day: int | None = ...,
hour: int | None = ...,
minute: int | None = ...,
second: int | None = ...,
) -> None: ...
@overload
def __sub__(self, other: _PeriodAddSub) -> Period: ...
@overload
def __sub__(self, other: Period) -> BaseOffset: ...
@overload
def __sub__(self, other: NaTType) -> NaTType: ...
@overload
def __sub__(self, other: PeriodIndex) -> Index: ...
@overload
def __sub__(self, other: TimedeltaSeries) -> PeriodSeries: ...
@overload
def __sub__(self, other: TimedeltaIndex) -> PeriodIndex: ...
@overload
def __add__(self, other: _PeriodAddSub) -> Period: ...
@overload
def __add__(self, other: NaTType) -> NaTType: ...
@overload
def __add__(self, other: Index) -> PeriodIndex: ...
@overload
def __add__(self, other: OffsetSeries | TimedeltaSeries) -> PeriodSeries: ...
# ignore[misc] here because we know all other comparisons
# are False, so we use Literal[False]
@overload
def __eq__(self, other: Period) -> bool: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __eq__(self, other: Index) -> np_1darray[np.bool]: ... # type: ignore[overload-overlap]
@overload
def __eq__(self, other: PeriodSeries) -> Series[bool]: ... # type: ignore[overload-overlap]
@overload
def __eq__(self, other: np_ndarray[ShapeT, np.object_]) -> np_ndarray[ShapeT, np.bool]: ... # type: ignore[overload-overlap]
@overload
def __eq__(self, other: object) -> Literal[False]: ...
@overload
def __ge__(self, other: Period) -> bool: ...
@overload
def __ge__(self, other: PeriodIndex) -> np_1darray[np.bool]: ...
@overload
def __ge__(self, other: PeriodSeries) -> Series[bool]: ...
@overload
def __ge__(
self, other: np_ndarray[ShapeT, np.object_]
) -> np_ndarray[ShapeT, np.bool]: ...
@overload
def __gt__(self, other: Period) -> bool: ...
@overload
def __gt__(self, other: PeriodIndex) -> np_1darray[np.bool]: ...
@overload
def __gt__(self, other: PeriodSeries) -> Series[bool]: ...
@overload
def __gt__(
self, other: np_ndarray[ShapeT, np.object_]
) -> np_ndarray[ShapeT, np.bool]: ...
@overload
def __le__(self, other: Period) -> bool: ...
@overload
def __le__(self, other: PeriodIndex) -> np_1darray[np.bool]: ...
@overload
def __le__(self, other: PeriodSeries) -> Series[bool]: ...
@overload
def __le__(
self, other: np_ndarray[ShapeT, np.object_]
) -> np_ndarray[ShapeT, np.bool]: ...
@overload
def __lt__(self, other: Period) -> bool: ...
@overload
def __lt__(self, other: PeriodIndex) -> np_1darray[np.bool]: ...
@overload
def __lt__(self, other: PeriodSeries) -> Series[bool]: ...
@overload
def __lt__(
self, other: np_ndarray[ShapeT, np.object_]
) -> np_ndarray[ShapeT, np.bool]: ...
# ignore[misc] here because we know all other comparisons
# are False, so we use Literal[False]
@overload
def __ne__(self, other: Period) -> bool: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __ne__(self, other: Index) -> np_1darray[np.bool]: ... # type: ignore[overload-overlap]
@overload
def __ne__(self, other: PeriodSeries) -> Series[bool]: ... # type: ignore[overload-overlap]
@overload
def __ne__(self, other: np_ndarray[ShapeT, np.object_]) -> np_ndarray[ShapeT, np.bool]: ... # type: ignore[overload-overlap]
@overload
def __ne__(self, other: object) -> Literal[True]: ...
# Ignored due to indecipherable error from mypy:
# Forward operator "__add__" is not callable [misc]
@overload
def __radd__(self, other: _PeriodAddSub) -> Period: ... # type: ignore[misc]
# Real signature is -> PeriodIndex, but conflicts with Index.__add__
# Changing Index is very hard due to Index inheritance
# Signatures of "__radd__" of "Period" and "__add__" of "Index"
# are unsafely overlapping
@overload
def __radd__(self, other: Index) -> Index: ...
@overload
def __radd__(self, other: TimedeltaSeries) -> PeriodSeries: ...
@overload
def __radd__(self, other: NaTType) -> NaTType: ...
@property
def day(self) -> int: ...
@property
def dayofweek(self) -> int: ...
@property
def dayofyear(self) -> int: ...
@property
def daysinmonth(self) -> int: ...
@property
def days_in_month(self) -> int: ...
@property
def end_time(self) -> Timestamp: ...
@property
def freq(self) -> BaseOffset: ...
@property
def freqstr(self) -> str: ...
@property
def hour(self) -> int: ...
@property
def minute(self) -> int: ...
@property
def month(self) -> int: ...
@property
def quarter(self) -> int: ...
@property
def qyear(self) -> int: ...
@property
def second(self) -> int: ...
@property
def ordinal(self) -> int: ...
@property
def is_leap_year(self) -> bool: ...
@property
def start_time(self) -> Timestamp: ...
@property
def week(self) -> int: ...
@property
def weekday(self) -> int: ...
@property
def weekofyear(self) -> int: ...
@property
def year(self) -> int: ...
@property
def day_of_year(self) -> int: ...
@property
def day_of_week(self) -> int: ...
def asfreq(self, freq: str | BaseOffset, how: _PeriodFreqHow = "end") -> Period: ...
@classmethod
def now(cls, freq: str | BaseOffset = ...) -> Period: ...
def strftime(self, fmt: str) -> str: ...
def to_timestamp(
self,
freq: str | BaseOffset | None = ...,
how: _PeriodToTimestampHow = "S",
) -> Timestamp: ...

View File

@ -0,0 +1,406 @@
# pyright: strict
import datetime as dt
from datetime import timedelta
from typing import (
ClassVar,
Literal,
NamedTuple,
overload,
)
import numpy as np
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
PeriodIndex,
Series,
TimedeltaIndex,
)
from pandas.core.series import (
TimedeltaSeries,
TimestampSeries,
)
from typing_extensions import (
Self,
TypeAlias,
)
from pandas._libs.tslibs import (
BaseOffset,
NaTType,
)
from pandas._libs.tslibs.period import Period
from pandas._libs.tslibs.timestamps import Timestamp
from pandas._typing import (
ShapeT,
TimeUnit,
np_1darray,
np_ndarray,
npt,
)
class Components(NamedTuple):
days: int
hours: int
minutes: int
seconds: int
milliseconds: int
microseconds: int
nanoseconds: int
# This should be kept consistent with the keys in the dict timedelta_abbrevs
# in pandas/_libs/tslibs/timedeltas.pyx
TimeDeltaUnitChoices: TypeAlias = Literal[
"W",
"w",
"D",
"d",
"days",
"day",
"hours",
"hour",
"hr",
"h",
"m",
"minute",
"min",
"minutes",
"s",
"seconds",
"sec",
"second",
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"us",
"microseconds",
"microsecond",
"µs",
"micro",
"micros",
"ns",
"nanoseconds",
"nano",
"nanos",
"nanosecond",
]
UnitChoices: TypeAlias = (
TimeDeltaUnitChoices
| Literal[
"Y",
"y",
"M",
]
)
class Timedelta(timedelta):
min: ClassVar[Timedelta] # pyright: ignore[reportIncompatibleVariableOverride]
max: ClassVar[Timedelta] # pyright: ignore[reportIncompatibleVariableOverride]
resolution: ClassVar[ # pyright: ignore[reportIncompatibleVariableOverride]
Timedelta
]
value: int
def __new__(
cls,
value: str | float | Timedelta | timedelta | np.timedelta64 = ...,
unit: TimeDeltaUnitChoices = ...,
*,
days: float | np.integer | np.floating = ...,
seconds: float | np.integer | np.floating = ...,
microseconds: float | np.integer | np.floating = ...,
milliseconds: float | np.integer | np.floating = ...,
minutes: float | np.integer | np.floating = ...,
hours: float | np.integer | np.floating = ...,
weeks: float | np.integer | np.floating = ...,
) -> Self: ...
# GH 46171
# While Timedelta can return pd.NaT, having the constructor return
# a Union with NaTType makes things awkward for users of pandas
@property
def days(self) -> int: ...
@property
def nanoseconds(self) -> int: ...
@property
def seconds(self) -> int: ...
@property
def microseconds(self) -> int: ...
def total_seconds(self) -> float: ...
def to_pytimedelta(self) -> timedelta: ...
def to_timedelta64(self) -> np.timedelta64: ...
@property
def asm8(self) -> np.timedelta64: ...
# TODO: round/floor/ceil could return NaT?
def round(self, freq: str | BaseOffset) -> Self: ...
def floor(self, freq: str | BaseOffset) -> Self: ...
def ceil(self, freq: str | BaseOffset) -> Self: ...
@property
def resolution_string(self) -> str: ...
# Override due to more types supported than dt.timedelta
@overload # type: ignore[override]
def __add__(self, other: timedelta | Timedelta | np.timedelta64) -> Timedelta: ...
@overload
def __add__(self, other: dt.datetime | np.datetime64 | Timestamp) -> Timestamp: ...
@overload
def __add__(self, other: NaTType) -> NaTType: ...
@overload
def __add__(self, other: Period) -> Period: ...
@overload
def __add__(self, other: dt.date) -> dt.date: ...
@overload
def __add__(self, other: PeriodIndex) -> PeriodIndex: ...
@overload
def __add__(self, other: DatetimeIndex) -> DatetimeIndex: ...
@overload
def __add__(
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.timedelta64]: ...
@overload
def __add__(
self, other: np_ndarray[ShapeT, np.datetime64]
) -> np_ndarray[ShapeT, np.datetime64]: ...
@overload
def __add__(self, other: pd.TimedeltaIndex) -> pd.TimedeltaIndex: ...
@overload
def __add__(
self,
other: TimedeltaSeries,
) -> TimedeltaSeries: ...
@overload
def __add__(self, other: TimestampSeries) -> TimestampSeries: ...
@overload
def __radd__(self, other: np.datetime64) -> Timestamp: ...
@overload
def __radd__(self, other: timedelta | Timedelta | np.timedelta64) -> Timedelta: ...
@overload
def __radd__(self, other: NaTType) -> NaTType: ...
@overload
def __radd__(
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.timedelta64]: ...
@overload
def __radd__(
self, other: np_ndarray[ShapeT, np.datetime64]
) -> np_ndarray[ShapeT, np.datetime64]: ...
@overload
def __radd__(self, other: pd.TimedeltaIndex) -> pd.TimedeltaIndex: ...
@overload
def __radd__(self, other: pd.PeriodIndex) -> pd.PeriodIndex: ...
# Override due to more types supported than dt.timedelta
@overload # type: ignore[override]
def __sub__(self, other: timedelta | Timedelta | np.timedelta64) -> Timedelta: ...
@overload
def __sub__(self, other: NaTType) -> NaTType: ...
@overload
def __sub__(
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.timedelta64]: ...
@overload
def __sub__(self, other: pd.TimedeltaIndex) -> TimedeltaIndex: ...
@overload
def __sub__(
self, other: TimedeltaSeries | Series[pd.Timedelta]
) -> TimedeltaSeries: ...
@overload
def __rsub__(self, other: timedelta | Timedelta | np.timedelta64) -> Timedelta: ...
@overload
def __rsub__(self, other: dt.datetime | Timestamp | np.datetime64) -> Timestamp: ... # type: ignore[misc]
@overload
def __rsub__(self, other: NaTType) -> NaTType: ...
@overload
def __rsub__(self, other: Period) -> Period: ...
@overload
def __rsub__(self, other: PeriodIndex) -> PeriodIndex: ...
@overload
def __rsub__(self, other: DatetimeIndex) -> DatetimeIndex: ...
@overload
def __rsub__(
self, other: np_ndarray[ShapeT, np.datetime64]
) -> np_ndarray[ShapeT, np.datetime64]: ...
@overload
def __rsub__(
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.timedelta64]: ...
@overload
def __rsub__(self, other: pd.TimedeltaIndex) -> pd.TimedeltaIndex: ...
def __neg__(self) -> Timedelta: ...
def __pos__(self) -> Timedelta: ...
def __abs__(self) -> Timedelta: ...
# Override due to more types supported than dt.timedelta
@overload # type: ignore[override]
def __mul__(self, other: float) -> Timedelta: ...
@overload
def __mul__(
self, other: np_ndarray[ShapeT, np.integer] | np_ndarray[ShapeT, np.floating]
) -> np_ndarray[ShapeT, np.timedelta64]: ...
@overload
def __mul__(self, other: Series[int]) -> TimedeltaSeries: ...
@overload
def __mul__(self, other: Series[float]) -> TimedeltaSeries: ...
@overload
def __mul__(self, other: Index[int] | Index[float]) -> TimedeltaIndex: ...
@overload
def __rmul__(self, other: float) -> Timedelta: ...
@overload
def __rmul__(
self, other: np_ndarray[ShapeT, np.floating] | np_ndarray[ShapeT, np.integer]
) -> np_ndarray[ShapeT, np.timedelta64]: ...
@overload
def __rmul__(self, other: Series[int]) -> TimedeltaSeries: ...
@overload
def __rmul__(self, other: Series[float]) -> TimedeltaSeries: ...
# maybe related to https://github.com/python/mypy/issues/10755
@overload
def __rmul__(self, other: Index[int] | Index[float]) -> TimedeltaIndex: ...
# Override due to more types supported than dt.timedelta
# error: Signature of "__floordiv__" incompatible with supertype "timedelta"
@overload # type: ignore[override]
def __floordiv__(self, other: timedelta | Timedelta | np.timedelta64) -> int: ...
@overload
def __floordiv__(self, other: float) -> Timedelta: ...
@overload
def __floordiv__(
self, other: np_ndarray[ShapeT, np.integer] | np_ndarray[ShapeT, np.floating]
) -> np_ndarray[ShapeT, np.timedelta64]: ...
@overload
def __floordiv__(
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.int_]: ...
@overload
def __floordiv__(self, other: Index[int] | Index[float]) -> TimedeltaIndex: ...
@overload
def __floordiv__(self, other: Series[int]) -> TimedeltaSeries: ...
@overload
def __floordiv__(self, other: Series[float]) -> TimedeltaSeries: ...
@overload
def __floordiv__(self, other: TimedeltaSeries) -> Series[int]: ...
@overload
def __floordiv__(self, other: NaTType | None) -> float: ...
@overload
def __rfloordiv__(self, other: timedelta | Timedelta | str) -> int: ...
@overload
def __rfloordiv__(self, other: NaTType | None) -> float: ...
@overload
def __rfloordiv__(
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.int_]: ...
# Override due to more types supported than dt.timedelta
@overload # type: ignore[override]
def __truediv__(self, other: timedelta | Timedelta | NaTType) -> float: ...
@overload
def __truediv__(self, other: float) -> Timedelta: ...
@overload
def __truediv__(
self, other: np_ndarray[ShapeT, np.integer] | np_ndarray[ShapeT, np.floating]
) -> np_ndarray[ShapeT, np.timedelta64]: ...
@overload
def __truediv__(self, other: TimedeltaSeries) -> Series[float]: ...
@overload
def __truediv__(self, other: Series[int]) -> TimedeltaSeries: ...
@overload
def __truediv__(self, other: Series[float]) -> TimedeltaSeries: ...
@overload
def __truediv__(self, other: Index[int] | Index[float]) -> TimedeltaIndex: ...
def __rtruediv__(self, other: timedelta | Timedelta | NaTType) -> float: ...
# Override due to more types supported than dt.timedelta
@overload
def __eq__(self, other: timedelta | Timedelta | np.timedelta64) -> bool: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __eq__(self, other: TimedeltaSeries | Series[pd.Timedelta]) -> Series[bool]: ... # type: ignore[overload-overlap]
@overload
def __eq__(self, other: Index) -> np_1darray[np.bool]: ... # type: ignore[overload-overlap]
@overload
def __eq__( # type: ignore[overload-overlap]
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.bool_]: ...
@overload
def __eq__(self, other: object) -> Literal[False]: ...
# Override due to more types supported than dt.timedelta
@overload
def __ne__(self, other: timedelta | Timedelta | np.timedelta64) -> bool: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __ne__(self, other: TimedeltaSeries | Series[pd.Timedelta]) -> Series[bool]: ... # type: ignore[overload-overlap]
@overload
def __ne__(self, other: Index) -> np_1darray[np.bool]: ... # type: ignore[overload-overlap]
@overload
def __ne__( # type: ignore[overload-overlap]
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.bool_]: ...
@overload
def __ne__(self, other: object) -> Literal[True]: ...
# Override due to more types supported than dt.timedelta
@overload # type: ignore[override]
def __mod__(self, other: timedelta) -> Timedelta: ...
@overload
def __mod__(self, other: float) -> Timedelta: ...
@overload
def __mod__(self, other: Series[int] | Series[float]) -> TimedeltaSeries: ...
@overload
def __mod__(self, other: Index[int] | Index[float]) -> TimedeltaIndex: ...
@overload
def __mod__(
self, other: np_ndarray[ShapeT, np.integer] | np_ndarray[ShapeT, np.floating]
) -> np_ndarray[ShapeT, np.timedelta64]: ...
@overload
def __mod__(
self, other: Series[int] | Series[float] | TimedeltaSeries
) -> TimedeltaSeries: ...
def __divmod__(self, other: timedelta) -> tuple[int, Timedelta]: ...
# Mypy complains Forward operator "<inequality op>" is not callable, so ignore misc
# for le, lt ge and gt
# Override due to more types supported than dt.timedelta
@overload # type: ignore[override]
def __le__(self, other: timedelta | Timedelta | np.timedelta64) -> bool: ... # type: ignore[misc]
@overload
def __le__(self, other: TimedeltaIndex) -> np_1darray[np.bool]: ...
@overload
def __le__(
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.bool_]: ...
@overload
def __le__(self, other: TimedeltaSeries | Series[pd.Timedelta]) -> Series[bool]: ...
# Override due to more types supported than dt.timedelta
@overload # type: ignore[override]
def __lt__(self, other: timedelta | Timedelta | np.timedelta64) -> bool: ... # type: ignore[misc]
@overload
def __lt__(self, other: TimedeltaIndex) -> np_1darray[np.bool]: ...
@overload
def __lt__(
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.bool_]: ...
@overload
def __lt__(self, other: TimedeltaSeries | Series[pd.Timedelta]) -> Series[bool]: ...
# Override due to more types supported than dt.timedelta
@overload # type: ignore[override]
def __ge__(self, other: timedelta | Timedelta | np.timedelta64) -> bool: ... # type: ignore[misc]
@overload
def __ge__(self, other: TimedeltaIndex) -> np_1darray[np.bool]: ...
@overload
def __ge__(
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.bool_]: ...
@overload
def __ge__(self, other: TimedeltaSeries | Series[pd.Timedelta]) -> Series[bool]: ...
# Override due to more types supported than dt.timedelta
@overload # type: ignore[override]
def __gt__(self, other: timedelta | Timedelta | np.timedelta64) -> bool: ... # type: ignore[misc]
@overload
def __gt__(self, other: TimedeltaIndex) -> np_1darray[np.bool]: ...
@overload
def __gt__(
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.bool_]: ...
@overload
def __gt__(self, other: TimedeltaSeries | Series[pd.Timedelta]) -> Series[bool]: ...
def __hash__(self) -> int: ...
def isoformat(self) -> str: ...
def to_numpy(self) -> np.timedelta64: ...
@property
def components(self) -> Components: ...
def view(self, dtype: npt.DTypeLike = ...) -> object: ...
@property
def unit(self) -> TimeUnit: ...
def as_unit(self, unit: TimeUnit, round_ok: bool = True) -> Self: ...

View File

@ -0,0 +1,352 @@
# pyright: strict
from datetime import (
date as _date,
datetime,
time as _time,
timedelta,
tzinfo as _tzinfo,
)
from datetime import _IsoCalendarDate # pyright: ignore[reportPrivateUsage]
import sys
from time import struct_time
from typing import (
ClassVar,
Literal,
SupportsIndex,
overload,
)
import numpy as np
from pandas import (
DatetimeIndex,
TimedeltaIndex,
)
from pandas.core.indexes.base import Index
from pandas.core.series import (
Series,
TimedeltaSeries,
TimestampSeries,
)
from typing_extensions import (
Never,
Self,
TypeAlias,
)
from pandas._libs.tslibs import (
BaseOffset,
Period,
Tick,
Timedelta,
)
from pandas._typing import (
ShapeT,
TimestampNonexistent,
TimeUnit,
np_1darray,
np_ndarray,
)
_Ambiguous: TypeAlias = bool | Literal["raise", "NaT"]
# Repeated from `_typing.pyi` so as to satisfy mixed strict / non-strict paths.
# https://github.com/pandas-dev/pandas-stubs/pull/1151#issuecomment-2715130190
TimeZones: TypeAlias = str | _tzinfo | None | int
class Timestamp(datetime, SupportsIndex):
min: ClassVar[Timestamp] # pyright: ignore[reportIncompatibleVariableOverride]
max: ClassVar[Timestamp] # pyright: ignore[reportIncompatibleVariableOverride]
resolution: ClassVar[ # pyright: ignore[reportIncompatibleVariableOverride]
Timedelta
]
value: int
def __new__(
cls,
ts_input: np.integer | float | str | _date | datetime | np.datetime64 = ...,
year: int | None = ...,
month: int | None = ...,
day: int | None = ...,
hour: int | None = ...,
minute: int | None = ...,
second: int | None = ...,
microsecond: int | None = ...,
tzinfo: _tzinfo | None = ...,
*,
nanosecond: int | None = ...,
tz: TimeZones = ...,
unit: str | int | None = ...,
fold: Literal[0, 1] | None = ...,
) -> Self: ...
# GH 46171
# While Timestamp can return pd.NaT, having the constructor return
# a Union with NaTType makes things awkward for users of pandas
@property
def year(self) -> int: ...
@property
def month(self) -> int: ...
@property
def day(self) -> int: ...
@property
def hour(self) -> int: ...
@property
def minute(self) -> int: ...
@property
def second(self) -> int: ...
@property
def microsecond(self) -> int: ...
@property
def nanosecond(self) -> int: ...
@property
def tzinfo(self) -> _tzinfo | None: ...
@property
def tz(self) -> _tzinfo | None: ...
@property
def fold(self) -> int: ...
if sys.version_info >= (3, 12):
@classmethod
def fromtimestamp( # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore
cls, t: float, tz: _tzinfo | str | None = ...
) -> Self: ...
else:
@classmethod
def fromtimestamp(cls, t: float, tz: _tzinfo | str | None = ...) -> Self: ...
@classmethod
def utcfromtimestamp(cls, ts: float) -> Self: ...
@classmethod
def today(cls, tz: _tzinfo | str | None = None) -> Self: ...
@classmethod
def fromordinal(
cls,
ordinal: int,
tz: _tzinfo | str | None = ...,
) -> Self: ...
@classmethod
def now(cls, tz: _tzinfo | str | None = None) -> Self: ...
@classmethod
def utcnow(cls) -> Self: ...
# error: Signature of "combine" incompatible with supertype "datetime"
@classmethod
def combine(cls, date: _date, time: _time) -> Self: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
@classmethod
def fromisoformat(cls, date_string: str) -> Self: ...
def strftime(self, format: str) -> str: ...
def __format__(self, fmt: str) -> str: ...
def toordinal(self) -> int: ...
def timetuple(self) -> struct_time: ...
def timestamp(self) -> float: ...
def utctimetuple(self) -> struct_time: ...
def date(self) -> _date: ...
def time(self) -> _time: ...
def timetz(self) -> _time: ...
# Override since fold is more precise than datetime.replace(fold:int)
# Here it is restricted to be 0 or 1 using a Literal
# Violation of Liskov substitution principle
def replace( # type:ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore
self,
year: int | None = ...,
month: int | None = ...,
day: int | None = ...,
hour: int | None = ...,
minute: int | None = ...,
second: int | None = ...,
microsecond: int | None = ...,
tzinfo: _tzinfo | None = ...,
fold: Literal[0, 1] | None = ...,
) -> Timestamp: ...
def astimezone(self, tz: _tzinfo | None = ...) -> Self: ...
def ctime(self) -> str: ...
def isoformat( # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
self,
sep: str = "T",
timespec: Literal[
"auto",
"hours",
"minutes",
"seconds",
"milliseconds",
"microseconds",
"nanoseconds",
] = "auto",
) -> str: ...
@classmethod
def strptime(cls, date_string: Never, format: Never) -> Never: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
def utcoffset(self) -> timedelta | None: ...
def tzname(self) -> str | None: ...
def dst(self) -> timedelta | None: ...
# Mypy complains Forward operator "<inequality op>" is not callable, so ignore misc
# for le, lt ge and gt
@overload # type: ignore[override]
def __le__(self, other: Timestamp | datetime | np.datetime64) -> bool: ... # type: ignore[misc]
@overload
def __le__(self, other: DatetimeIndex) -> np_1darray[np.bool]: ...
@overload
def __le__(
self, other: np_ndarray[ShapeT, np.datetime64]
) -> np_ndarray[ShapeT, np.bool]: ...
@overload
def __le__(self, other: TimestampSeries) -> Series[bool]: ...
@overload # type: ignore[override]
def __lt__(self, other: Timestamp | datetime | np.datetime64) -> bool: ... # type: ignore[misc]
@overload
def __lt__(self, other: DatetimeIndex) -> np_1darray[np.bool]: ...
@overload
def __lt__(
self, other: np_ndarray[ShapeT, np.datetime64]
) -> np_ndarray[ShapeT, np.bool]: ...
@overload
def __lt__(self, other: TimestampSeries) -> Series[bool]: ...
@overload # type: ignore[override]
def __ge__(self, other: Timestamp | datetime | np.datetime64) -> bool: ... # type: ignore[misc]
@overload
def __ge__(self, other: DatetimeIndex) -> np_1darray[np.bool]: ...
@overload
def __ge__(
self, other: np_ndarray[ShapeT, np.datetime64]
) -> np_ndarray[ShapeT, np.bool]: ...
@overload
def __ge__(self, other: TimestampSeries) -> Series[bool]: ...
@overload # type: ignore[override]
def __gt__(self, other: Timestamp | datetime | np.datetime64) -> bool: ... # type: ignore[misc]
@overload
def __gt__(self, other: DatetimeIndex) -> np_1darray[np.bool]: ...
@overload
def __gt__(
self, other: np_ndarray[ShapeT, np.datetime64]
) -> np_ndarray[ShapeT, np.bool]: ...
@overload
def __gt__(self, other: TimestampSeries) -> Series[bool]: ...
# error: Signature of "__add__" incompatible with supertype "date"/"datetime"
@overload # type: ignore[override]
def __add__(
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.datetime64]: ...
@overload
def __add__(self, other: timedelta | np.timedelta64 | Tick) -> Self: ...
@overload
def __add__(self, other: TimedeltaSeries) -> TimestampSeries: ...
@overload
def __add__(self, other: TimedeltaIndex) -> DatetimeIndex: ...
@overload
def __radd__(self, other: timedelta) -> Self: ...
@overload
def __radd__(self, other: TimedeltaIndex) -> DatetimeIndex: ...
@overload
def __radd__(
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.datetime64]: ...
# TODO: test dt64
@overload # type: ignore[override]
def __sub__(self, other: Timestamp | datetime | np.datetime64) -> Timedelta: ...
@overload
def __sub__(self, other: timedelta | np.timedelta64 | Tick) -> Self: ...
@overload
def __sub__(self, other: TimedeltaIndex) -> DatetimeIndex: ...
@overload
def __sub__(self, other: TimedeltaSeries) -> TimestampSeries: ...
@overload
def __sub__(self, other: TimestampSeries) -> TimedeltaSeries: ...
@overload
def __sub__(
self, other: np_ndarray[ShapeT, np.timedelta64]
) -> np_ndarray[ShapeT, np.datetime64]: ...
@overload
def __eq__(self, other: Timestamp | datetime | np.datetime64) -> bool: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __eq__(self, other: TimestampSeries) -> Series[bool]: ... # type: ignore[overload-overlap]
@overload
def __eq__(self, other: Index) -> np_1darray[np.bool]: ... # type: ignore[overload-overlap]
@overload
def __eq__(self, other: np_ndarray[ShapeT, np.datetime64]) -> np_ndarray[ShapeT, np.bool]: ... # type: ignore[overload-overlap]
@overload
def __eq__(self, other: object) -> Literal[False]: ...
@overload
def __ne__(self, other: Timestamp | datetime | np.datetime64) -> bool: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __ne__(self, other: TimestampSeries) -> Series[bool]: ... # type: ignore[overload-overlap]
@overload
def __ne__(self, other: Index) -> np_1darray[np.bool]: ... # type: ignore[overload-overlap]
@overload
def __ne__(self, other: np_ndarray[ShapeT, np.datetime64]) -> np_ndarray[ShapeT, np.bool]: ... # type: ignore[overload-overlap]
@overload
def __ne__(self, other: object) -> Literal[True]: ...
def __hash__(self) -> int: ...
def weekday(self) -> int: ...
def isoweekday(self) -> int: ...
def isocalendar(self) -> _IsoCalendarDate: ...
@property
def is_leap_year(self) -> bool: ...
@property
def is_month_start(self) -> bool: ...
@property
def is_quarter_start(self) -> bool: ...
@property
def is_year_start(self) -> bool: ...
@property
def is_month_end(self) -> bool: ...
@property
def is_quarter_end(self) -> bool: ...
@property
def is_year_end(self) -> bool: ...
def to_pydatetime(self, warn: bool = ...) -> datetime: ...
def to_datetime64(self) -> np.datetime64: ...
def to_period(self, freq: BaseOffset | str | None = ...) -> Period: ...
def to_julian_date(self) -> np.float64: ...
@property
def asm8(self) -> np.datetime64: ...
def tz_convert(self, tz: TimeZones) -> Self: ...
# TODO: could return NaT?
def tz_localize(
self,
tz: TimeZones,
ambiguous: _Ambiguous = "raise",
nonexistent: TimestampNonexistent = "raise",
) -> Self: ...
def normalize(self) -> Self: ...
# TODO: round/floor/ceil could return NaT?
def round(
self,
freq: str,
ambiguous: _Ambiguous = "raise",
nonexistent: TimestampNonexistent = "raise",
) -> Self: ...
def floor(
self,
freq: str,
ambiguous: _Ambiguous = "raise",
nonexistent: TimestampNonexistent = "raise",
) -> Self: ...
def ceil(
self,
freq: str,
ambiguous: _Ambiguous = "raise",
nonexistent: TimestampNonexistent = "raise",
) -> Self: ...
def day_name(self, locale: str | None = None) -> str: ...
def month_name(self, locale: str | None = None) -> str: ...
@property
def day_of_week(self) -> int: ...
@property
def dayofweek(self) -> int: ...
@property
def day_of_year(self) -> int: ...
@property
def dayofyear(self) -> int: ...
@property
def weekofyear(self) -> int: ...
@property
def quarter(self) -> int: ...
@property
def week(self) -> int: ...
def to_numpy(self) -> np.datetime64: ...
@property
def days_in_month(self) -> int: ...
@property
def daysinmonth(self) -> int: ...
@property
def unit(self) -> TimeUnit: ...
def as_unit(self, unit: TimeUnit, round_ok: bool = True) -> Self: ...
# To support slicing
def __index__(self) -> int: ...

View File

@ -0,0 +1,192 @@
from collections.abc import (
Container,
Generator,
Iterable,
)
from contextlib import contextmanager
from typing import (
Literal,
overload,
)
import warnings
from matplotlib.artist import Artist
import numpy as np
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
from pandas.arrays import (
DatetimeArray,
IntervalArray,
PeriodArray,
SparseArray,
TimedeltaArray,
)
from pandas.core.arrays.base import ExtensionArray
from pandas._typing import (
AnyArrayLike,
T,
)
def assert_almost_equal(
left: T,
right: T,
check_dtype: bool | Literal["equiv"] = "equiv",
rtol: float = 1e-5,
atol: float = 1e-8,
**kwargs,
) -> None: ...
def assert_dict_equal(left: dict, right: dict, compare_keys: bool = True) -> None: ...
def assert_index_equal(
left: Index,
right: Index,
exact: bool | Literal["equiv"] = "equiv",
check_names: bool = True,
check_exact: bool = True,
check_categorical: bool = True,
check_order: bool = True,
rtol: float = 1e-5,
atol: float = 1e-8,
obj: str = "Index",
) -> None: ...
def assert_class_equal(
left: T, right: T, exact: bool | Literal["equiv"] = True, obj: str = "Input"
) -> None: ...
def assert_attr_equal(
attr: str, left: object, right: object, obj: str = "Attributes"
) -> None: ...
def assert_is_valid_plot_return_object(
objs: Series | np.ndarray | Artist | tuple | dict,
) -> None: ...
def assert_is_sorted(seq: AnyArrayLike) -> None: ...
def assert_categorical_equal(
left: Categorical,
right: Categorical,
check_dtype: bool = True,
check_category_order: bool = True,
obj: str = "Categorical",
) -> None: ...
def assert_interval_array_equal(
left: IntervalArray,
right: IntervalArray,
exact: bool | Literal["equiv"] = "equiv",
obj: str = "IntervalArray",
) -> None: ...
def assert_period_array_equal(
left: PeriodArray, right: PeriodArray, obj: str = "PeriodArray"
) -> None: ...
def assert_datetime_array_equal(
left: DatetimeArray,
right: DatetimeArray,
obj: str = "DatetimeArray",
check_freq: bool = True,
) -> None: ...
def assert_timedelta_array_equal(
left: TimedeltaArray,
right: TimedeltaArray,
obj: str = "TimedeltaArray",
check_freq: bool = True,
) -> None: ...
def assert_numpy_array_equal(
left,
right,
strict_nan: bool = False,
check_dtype: bool | Literal["equiv"] = True,
err_msg: str | None = None,
check_same: Literal["copy", "same"] | None = None,
obj: str = "numpy array",
index_values: Index | np.ndarray | None = None,
) -> None: ...
def assert_extension_array_equal(
left: ExtensionArray,
right: ExtensionArray,
check_dtype: bool | Literal["equiv"] = True,
index_values: Index | np.ndarray | None = None,
check_exact: bool = False,
rtol: float = 1e-5,
atol: float = 1e-8,
obj: str = "ExtensionArray",
) -> None: ...
@overload
def assert_series_equal(
left: Series,
right: Series,
check_dtype: bool | Literal["equiv"] = ...,
check_index_type: bool | Literal["equiv"] = ...,
check_series_type: bool = ...,
check_names: bool = ...,
check_exact: bool = ...,
check_datetimelike_compat: bool = ...,
check_categorical: bool = ...,
check_category_order: bool = ...,
check_freq: bool = ...,
check_flags: bool = ...,
rtol: float = ...,
atol: float = ...,
obj: str = ...,
*,
check_index: Literal[False],
check_like: Literal[False] = ...,
) -> None: ...
@overload
def assert_series_equal(
left: Series,
right: Series,
check_dtype: bool | Literal["equiv"] = ...,
check_index_type: bool | Literal["equiv"] = ...,
check_series_type: bool = ...,
check_names: bool = ...,
check_exact: bool = ...,
check_datetimelike_compat: bool = ...,
check_categorical: bool = ...,
check_category_order: bool = ...,
check_freq: bool = ...,
check_flags: bool = ...,
rtol: float = ...,
atol: float = ...,
obj: str = ...,
*,
check_index: Literal[True] = ...,
check_like: bool = ...,
) -> None: ...
def assert_frame_equal(
left: DataFrame,
right: DataFrame,
check_dtype: bool | Literal["equiv"] = True,
check_index_type: bool | Literal["equiv"] = "equiv",
check_column_type: bool | Literal["equiv"] = "equiv",
check_frame_type: bool = True,
check_names: bool = True,
by_blocks: bool = False,
check_exact: bool = False,
check_datetimelike_compat: bool = False,
check_categorical: bool = True,
check_like: bool = False,
check_freq: bool = True,
check_flags: bool = True,
rtol: float = 1e-5,
atol: float = 1e-8,
obj: str = "DataFrame",
) -> None: ...
def assert_equal(left, right, **kwargs) -> None: ...
def assert_sp_array_equal(left: SparseArray, right: SparseArray) -> None: ...
def assert_contains_all(iterable: Iterable[T], dic: Container[T]) -> None: ...
def assert_copy(iter1: Iterable[T], iter2: Iterable[T], **eql_kwargs) -> None: ...
@contextmanager
def assert_produces_warning(
expected_warning: (
type[Warning] | Literal[False] | tuple[type[Warning], ...] | None
) = ...,
filter_level: Literal[
"error", "ignore", "always", "default", "module", "once"
] = "always",
check_stacklevel: bool = True,
raise_on_extra_warnings: bool = True,
match: str | None = None,
) -> Generator[list[warnings.WarningMessage], None, None]: ...
@contextmanager
def ensure_clean(filename: str | None = None) -> Generator[str, None, None]: ...

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,5 @@
from typing import Literal
version_json: str = ...
_stub_version: Literal["2.3.2.250827"]

View File

@ -0,0 +1,7 @@
from pandas.api import (
extensions as extensions,
indexers as indexers,
interchange as interchange,
types as types,
typing as typing,
)

View File

@ -0,0 +1,17 @@
from pandas.core.accessor import (
register_dataframe_accessor as register_dataframe_accessor,
register_index_accessor as register_index_accessor,
register_series_accessor as register_series_accessor,
)
from pandas.core.algorithms import take as take
from pandas.core.arrays import (
ExtensionArray as ExtensionArray,
ExtensionScalarOpsMixin as ExtensionScalarOpsMixin,
)
from pandas._libs.lib import no_default as no_default
from pandas.core.dtypes.base import (
ExtensionDtype as ExtensionDtype,
register_extension_dtype as register_extension_dtype,
)

View File

@ -0,0 +1,6 @@
from pandas.core.indexers import check_array_indexer as check_array_indexer
from pandas.core.indexers.objects import (
BaseIndexer as BaseIndexer,
FixedForwardWindowIndexer as FixedForwardWindowIndexer,
VariableOffsetWindowIndexer as VariableOffsetWindowIndexer,
)

View File

@ -0,0 +1,2 @@
from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrame
from pandas.core.interchange.from_dataframe import from_dataframe as from_dataframe

View File

@ -0,0 +1,43 @@
from pandas._libs.lib import infer_dtype as infer_dtype
from pandas.core.dtypes.api import (
is_any_real_numeric_dtype as is_any_real_numeric_dtype,
is_bool as is_bool,
is_bool_dtype as is_bool_dtype,
is_complex as is_complex,
is_complex_dtype as is_complex_dtype,
is_datetime64_any_dtype as is_datetime64_any_dtype,
is_datetime64_dtype as is_datetime64_dtype,
is_datetime64_ns_dtype as is_datetime64_ns_dtype,
is_dict_like as is_dict_like,
is_dtype_equal as is_dtype_equal,
is_extension_array_dtype as is_extension_array_dtype,
is_file_like as is_file_like,
is_float as is_float,
is_float_dtype as is_float_dtype,
is_hashable as is_hashable,
is_integer as is_integer,
is_integer_dtype as is_integer_dtype,
is_iterator as is_iterator,
is_list_like as is_list_like,
is_named_tuple as is_named_tuple,
is_number as is_number,
is_numeric_dtype as is_numeric_dtype,
is_object_dtype as is_object_dtype,
is_re as is_re,
is_re_compilable as is_re_compilable,
is_scalar as is_scalar,
is_signed_integer_dtype as is_signed_integer_dtype,
is_string_dtype as is_string_dtype,
is_timedelta64_dtype as is_timedelta64_dtype,
is_timedelta64_ns_dtype as is_timedelta64_ns_dtype,
is_unsigned_integer_dtype as is_unsigned_integer_dtype,
pandas_dtype as pandas_dtype,
)
from pandas.core.dtypes.concat import union_categoricals as union_categoricals
from pandas.core.dtypes.dtypes import (
CategoricalDtype as CategoricalDtype,
DatetimeTZDtype as DatetimeTZDtype,
IntervalDtype as IntervalDtype,
PeriodDtype as PeriodDtype,
)

View File

@ -0,0 +1,31 @@
from pandas.core.groupby import (
DataFrameGroupBy as DataFrameGroupBy,
SeriesGroupBy as SeriesGroupBy,
)
from pandas.core.indexes.frozen import FrozenList as FrozenList
from pandas.core.resample import (
DatetimeIndexResamplerGroupby as DatetimeIndexResamplerGroupby,
PeriodIndexResamplerGroupby as PeriodIndexResamplerGroupby,
Resampler as Resampler,
TimedeltaIndexResamplerGroupby as TimedeltaIndexResamplerGroupby,
TimeGrouper as TimeGrouper,
)
from pandas.core.window import (
Expanding as Expanding,
ExpandingGroupby as ExpandingGroupby,
ExponentialMovingWindow as ExponentialMovingWindow,
ExponentialMovingWindowGroupby as ExponentialMovingWindowGroupby,
Rolling as Rolling,
RollingGroupby as RollingGroupby,
Window as Window,
)
from pandas._libs import NaTType as NaTType
from pandas._libs.lib import _NoDefaultDoNotUse as _NoDefaultDoNotUse
from pandas._libs.missing import NAType as NAType
from pandas.io.json._json import JsonReader as JsonReader
# SASReader is not defined so commenting it out for now
# from pandas.io.sas.sasreader import SASReader as SASReader
from pandas.io.stata import StataReader as StataReader

View File

@ -0,0 +1,12 @@
from pandas.core.arrays import (
BooleanArray as BooleanArray,
Categorical as Categorical,
DatetimeArray as DatetimeArray,
IntegerArray as IntegerArray,
IntervalArray as IntervalArray,
PandasArray as PandasArray,
PeriodArray as PeriodArray,
SparseArray as SparseArray,
StringArray as StringArray,
TimedeltaArray as TimedeltaArray,
)

View File

@ -0,0 +1,9 @@
from typing import Callable
from pandas._typing import TypeT
class PandasDelegate: ...
def register_dataframe_accessor(name: str) -> Callable[[TypeT], TypeT]: ...
def register_series_accessor(name: str) -> Callable[[TypeT], TypeT]: ...
def register_index_accessor(name: str) -> Callable[[TypeT], TypeT]: ...

View File

@ -0,0 +1,80 @@
from typing import (
Literal,
overload,
)
import numpy as np
from pandas import (
Categorical,
CategoricalIndex,
Index,
IntervalIndex,
PeriodIndex,
Series,
)
from pandas.api.extensions import ExtensionArray
from pandas._typing import (
AnyArrayLike,
IntervalT,
TakeIndexer,
np_1darray,
)
# These are type: ignored because the Index types overlap due to inheritance but indices
# with extension types return the same type while standard type return ndarray
@overload
def unique( # pyright: ignore[reportOverlappingOverload]
values: PeriodIndex,
) -> PeriodIndex: ...
@overload
def unique(values: CategoricalIndex) -> CategoricalIndex: ... # type: ignore[overload-overlap]
@overload
def unique(values: IntervalIndex[IntervalT]) -> IntervalIndex[IntervalT]: ...
@overload
def unique(values: Index) -> np.ndarray: ...
@overload
def unique(values: Categorical) -> Categorical: ...
@overload
def unique(values: Series) -> np.ndarray | ExtensionArray: ...
@overload
def unique(values: np.ndarray) -> np.ndarray: ...
@overload
def unique(values: ExtensionArray) -> ExtensionArray: ...
@overload
def factorize(
values: np.ndarray,
sort: bool = ...,
use_na_sentinel: bool = ...,
size_hint: int | None = ...,
) -> tuple[np.ndarray, np.ndarray]: ...
@overload
def factorize(
values: Index | Series,
sort: bool = ...,
use_na_sentinel: bool = ...,
size_hint: int | None = ...,
) -> tuple[np_1darray, Index]: ...
@overload
def factorize(
values: Categorical,
sort: bool = ...,
use_na_sentinel: bool = ...,
size_hint: int | None = ...,
) -> tuple[np_1darray, Categorical]: ...
def value_counts(
values: AnyArrayLike | list | tuple,
sort: bool = True,
ascending: bool = False,
normalize: bool = False,
bins: int | None = None,
dropna: bool = True,
) -> Series: ...
def take(
arr,
indices: TakeIndexer,
axis: Literal[0, 1] = 0,
allow_fill: bool = False,
fill_value=None,
): ...

View File

@ -0,0 +1,78 @@
from pandas.core.algorithms import (
factorize as factorize,
unique as unique,
value_counts as value_counts,
)
from pandas.core.arrays import Categorical as Categorical
from pandas.core.arrays.arrow.dtype import ArrowDtype as ArrowDtype
from pandas.core.arrays.boolean import BooleanDtype as BooleanDtype
from pandas.core.arrays.floating import (
Float32Dtype as Float32Dtype,
Float64Dtype as Float64Dtype,
)
from pandas.core.arrays.integer import (
Int8Dtype as Int8Dtype,
Int16Dtype as Int16Dtype,
Int32Dtype as Int32Dtype,
Int64Dtype as Int64Dtype,
UInt8Dtype as UInt8Dtype,
UInt16Dtype as UInt16Dtype,
UInt32Dtype as UInt32Dtype,
UInt64Dtype as UInt64Dtype,
)
from pandas.core.arrays.string_ import StringDtype as StringDtype
from pandas.core.construction import array as array
from pandas.core.frame import DataFrame as DataFrame
from pandas.core.groupby import (
Grouper as Grouper,
NamedAgg as NamedAgg,
)
from pandas.core.indexes.api import (
CategoricalIndex as CategoricalIndex,
DatetimeIndex as DatetimeIndex,
Index as Index,
IntervalIndex as IntervalIndex,
MultiIndex as MultiIndex,
PeriodIndex as PeriodIndex,
RangeIndex as RangeIndex,
TimedeltaIndex as TimedeltaIndex,
)
from pandas.core.indexes.datetimes import (
bdate_range as bdate_range,
date_range as date_range,
)
from pandas.core.indexes.interval import (
Interval as Interval,
interval_range as interval_range,
)
from pandas.core.indexes.period import period_range as period_range
from pandas.core.indexes.timedeltas import timedelta_range as timedelta_range
from pandas.core.indexing import IndexSlice as IndexSlice
from pandas.core.series import Series as Series
from pandas.core.tools.datetimes import to_datetime as to_datetime
from pandas.core.tools.numeric import to_numeric as to_numeric
from pandas.core.tools.timedeltas import to_timedelta as to_timedelta
from pandas._libs import (
NaT as NaT,
Period as Period,
Timedelta as Timedelta,
)
from pandas._libs.missing import NA as NA
from pandas._libs.tslibs import Timestamp as Timestamp
from pandas.core.dtypes.dtypes import (
CategoricalDtype as CategoricalDtype,
DatetimeTZDtype as DatetimeTZDtype,
IntervalDtype as IntervalDtype,
PeriodDtype as PeriodDtype,
)
from pandas.core.dtypes.missing import (
isna as isna,
isnull as isnull,
notna as notna,
notnull as notnull,
)
from pandas.io.formats.format import set_eng_float_format as set_eng_float_format
from pandas.tseries.offsets import DateOffset as DateOffset

View File

@ -0,0 +1,39 @@
from typing import Any
from typing_extensions import Self
class OpsMixin:
def __eq__(self, other: object) -> Self: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
def __ne__(self, other: object) -> Self: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
def __lt__(self, other: Any) -> Self: ...
def __le__(self, other: Any) -> Self: ...
def __gt__(self, other: Any) -> Self: ...
def __ge__(self, other: Any) -> Self: ...
# -------------------------------------------------------------
# Logical Methods
def __and__(self, other: Any) -> Self: ...
def __rand__(self, other: Any) -> Self: ...
def __or__(self, other: Any) -> Self: ...
def __ror__(self, other: Any) -> Self: ...
def __xor__(self, other: Any) -> Self: ...
def __rxor__(self, other: Any) -> Self: ...
# -------------------------------------------------------------
# Arithmetic Methods
def __add__(self, other: Any) -> Self: ...
def __radd__(self, other: Any) -> Self: ...
def __sub__(self, other: Any) -> Self: ...
def __rsub__(self, other: Any) -> Self: ...
def __mul__(self, other: Any) -> Self: ...
def __rmul__(self, other: Any) -> Self: ...
# Handled by subclasses that specify only the valid values
# that can be passed
# def __truediv__(self, other: Any) -> Self: ...
# def __rtruediv__(self, other: Any) -> Self: ...
# def __floordiv__(self, other: Any) -> Self: ...
# def __rfloordiv__(self, other: Any) -> Self: ...
def __mod__(self, other: Any) -> Self: ...
def __rmod__(self, other: Any) -> Self: ...
def __divmod__(self, other: Any) -> tuple[Self, Self]: ...
def __rdivmod__(self, other: Any) -> tuple[Self, Self]: ...
def __pow__(self, other: Any) -> Self: ...
def __rpow__(self, other: Any) -> Self: ...

View File

@ -0,0 +1,15 @@
from pandas.core.arrays.base import (
ExtensionArray as ExtensionArray,
ExtensionOpsMixin as ExtensionOpsMixin,
ExtensionScalarOpsMixin as ExtensionScalarOpsMixin,
)
from pandas.core.arrays.boolean import BooleanArray as BooleanArray
from pandas.core.arrays.categorical import Categorical as Categorical
from pandas.core.arrays.datetimes import DatetimeArray as DatetimeArray
from pandas.core.arrays.integer import IntegerArray as IntegerArray
from pandas.core.arrays.interval import IntervalArray as IntervalArray
from pandas.core.arrays.numpy_ import PandasArray as PandasArray
from pandas.core.arrays.period import PeriodArray as PeriodArray
from pandas.core.arrays.sparse import SparseArray as SparseArray
from pandas.core.arrays.string_ import StringArray as StringArray
from pandas.core.arrays.timedeltas import TimedeltaArray as TimedeltaArray

View File

@ -0,0 +1,11 @@
import pyarrow as pa
from pandas._libs.missing import NAType
from pandas.core.dtypes.base import StorageExtensionDtype
class ArrowDtype(StorageExtensionDtype):
pyarrow_dtype: pa.DataType
def __init__(self, pyarrow_dtype: pa.DataType) -> None: ...
@property
def na_value(self) -> NAType: ...

View File

@ -0,0 +1,81 @@
from collections.abc import Iterator
from typing import (
Any,
overload,
)
import numpy as np
from typing_extensions import Self
from pandas._typing import (
ArrayLike,
Scalar,
ScalarIndexer,
SequenceIndexer,
TakeIndexer,
np_1darray,
npt,
)
from pandas.core.dtypes.dtypes import ExtensionDtype as ExtensionDtype
class ExtensionArray:
@overload
def __getitem__(self, item: ScalarIndexer) -> Any: ...
@overload
def __getitem__(self, item: SequenceIndexer) -> Self: ...
def __setitem__(self, key: int | slice | np.ndarray, value: Any) -> None: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[Any]: ...
def __contains__(self, item: object) -> bool | np.bool_: ...
def to_numpy(
self,
dtype: npt.DTypeLike | None = ...,
copy: bool = False,
na_value: Scalar = ...,
) -> np_1darray[Any]: ...
@property
def dtype(self) -> ExtensionDtype: ...
@property
def shape(self) -> tuple[int, ...]: ...
@property
def ndim(self) -> int: ...
@property
def nbytes(self) -> int: ...
def astype(self, dtype, copy: bool = True): ...
def isna(self) -> ArrayLike: ...
def argsort(
self, *, ascending: bool = ..., kind: str = ..., **kwargs
) -> np_1darray: ...
def fillna(self, value=..., method=None, limit=None): ...
def dropna(self): ...
def shift(self, periods: int = 1, fill_value: object = ...) -> Self: ...
def unique(self): ...
def searchsorted(self, value, side: str = ..., sorter=...): ...
def factorize(self, use_na_sentinel: bool = True) -> tuple[np_1darray, Self]: ...
def repeat(self, repeats, axis=...): ...
def take(
self,
indexer: TakeIndexer,
*,
allow_fill: bool = ...,
fill_value=...,
) -> Self: ...
def copy(self) -> Self: ...
def view(self, dtype=...) -> Self | np_1darray: ...
def ravel(self, order="C") -> Self: ...
def tolist(self) -> list: ...
def _reduce(
self, name: str, *, skipna: bool = ..., keepdims: bool = ..., **kwargs
) -> object: ...
def _accumulate(self, name: str, *, skipna: bool = ..., **kwargs) -> Self: ...
class ExtensionOpsMixin:
@classmethod
def _add_arithmetic_ops(cls) -> None: ...
@classmethod
def _add_comparison_ops(cls) -> None: ...
@classmethod
def _add_logical_ops(cls) -> None: ...
class ExtensionScalarOpsMixin(ExtensionOpsMixin): ...

View File

@ -0,0 +1,25 @@
import numpy as np
from pandas.core.arrays.masked import BaseMaskedArray as BaseMaskedArray
from pandas._libs.missing import NAType
from pandas._typing import type_t
from pandas.core.dtypes.base import ExtensionDtype as ExtensionDtype
class BooleanDtype(ExtensionDtype):
@property
def na_value(self) -> NAType: ...
@classmethod
def construct_array_type(cls) -> type_t[BooleanArray]: ...
class BooleanArray(BaseMaskedArray):
def __init__(
self, values: np.ndarray, mask: np.ndarray, copy: bool = ...
) -> None: ...
@property
def dtype(self): ...
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ...
def __setitem__(self, key, value) -> None: ...
def astype(self, dtype, copy: bool = True): ...
def any(self, *, skipna: bool = ..., **kwargs): ...
def all(self, *, skipna: bool = ..., **kwargs): ...

View File

@ -0,0 +1,161 @@
from collections.abc import (
Callable,
Sequence,
)
from typing import (
Any,
overload,
)
import numpy as np
from pandas import Series
from pandas.core.accessor import PandasDelegate as PandasDelegate
from pandas.core.arrays.base import ExtensionArray as ExtensionArray
from pandas.core.base import NoNewAttributesMixin as NoNewAttributesMixin
from pandas.core.indexes.base import Index
from typing_extensions import Self
from pandas._typing import (
ArrayLike,
Dtype,
ListLike,
Ordered,
PositionalIndexerTuple,
Scalar,
ScalarIndexer,
SequenceIndexer,
TakeIndexer,
np_1darray,
)
from pandas.core.dtypes.dtypes import CategoricalDtype as CategoricalDtype
def contains(cat, key, container): ...
class Categorical(ExtensionArray):
__array_priority__: int = ...
def __init__(
self,
values: ListLike,
categories=...,
ordered: bool | None = ...,
dtype: CategoricalDtype | None = ...,
fastpath: bool = ...,
) -> None: ...
@property
def categories(self): ...
@property
def ordered(self) -> Ordered: ...
@property
def dtype(self) -> CategoricalDtype: ...
def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: ...
def size(self) -> int: ...
def tolist(self) -> list[Scalar]: ...
to_list = ...
@classmethod
def from_codes(
cls,
codes: Sequence[int],
categories: Index | None = ...,
ordered: bool | None = ...,
dtype: CategoricalDtype | None = ...,
fastpath: bool = ...,
) -> Categorical: ...
@property
def codes(self) -> np_1darray[np.signedinteger]: ...
def set_ordered(self, value) -> Categorical: ...
def as_ordered(self) -> Categorical: ...
def as_unordered(self) -> Categorical: ...
def set_categories(
self,
new_categories,
ordered: bool | None = False,
rename: bool = False,
) -> Categorical: ...
def rename_categories(self, new_categories) -> Categorical: ...
def reorder_categories(
self, new_categories, ordered: bool | None = ...
) -> Categorical: ...
def add_categories(self, new_categories) -> Categorical: ...
def remove_categories(self, removals) -> Categorical: ...
def remove_unused_categories(self) -> Categorical: ...
def map(self, mapper): ...
def __eq__(self, other) -> bool: ...
def __ne__(self, other) -> bool: ...
def __lt__(self, other) -> bool: ...
def __gt__(self, other) -> bool: ...
def __le__(self, other) -> bool: ...
def __ge__(self, other) -> bool: ...
@property
def shape(self): ...
def shift(self, periods=1, fill_value=...): ...
def __array__(self, dtype=...) -> np_1darray: ...
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ...
@property
def T(self): ...
@property
def nbytes(self) -> int: ...
def memory_usage(self, deep: bool = ...): ...
def searchsorted(self, value, side: str = ..., sorter=...): ...
def isna(self) -> np_1darray[np.bool]: ...
def isnull(self) -> np_1darray[np.bool]: ...
def notna(self) -> np_1darray[np.bool]: ...
def notnull(self) -> np_1darray[np.bool]: ...
def dropna(self): ...
def value_counts(self, dropna: bool = True): ...
def check_for_ordered(self, op) -> None: ...
def argsort(self, *, ascending: bool = ..., kind: str = ..., **kwargs): ...
def sort_values(
self, *, inplace: bool = ..., ascending: bool = ..., na_position: str = ...
): ...
def view(self, dtype=...): ...
def fillna(self, value=..., method=None, limit=None): ...
def take(
self, indexer: TakeIndexer, *, allow_fill: bool = ..., fill_value=...
) -> Categorical: ...
def __len__(self) -> int: ...
def __iter__(self): ...
def __contains__(self, key) -> bool: ...
@overload
def __getitem__(self, key: ScalarIndexer) -> Any: ...
@overload
def __getitem__(
self,
key: SequenceIndexer | PositionalIndexerTuple,
) -> Self: ...
def __setitem__(self, key, value) -> None: ...
def min(self, *, skipna: bool = ...): ...
def max(self, *, skipna: bool = ...): ...
def unique(self): ...
def equals(self, other): ...
def describe(self): ...
def repeat(self, repeats, axis=...): ...
def isin(self, values): ...
class CategoricalAccessor(PandasDelegate, NoNewAttributesMixin):
def __init__(self, data) -> None: ...
@property
def codes(self) -> Series[int]: ...
@property
def categories(self) -> Index: ...
@property
def ordered(self) -> bool | None: ...
def rename_categories(
self, new_categories: ListLike | dict[Any, Any] | Callable[[Any], Any]
) -> Series: ...
def reorder_categories(
self,
new_categories: ListLike,
ordered: bool = ...,
) -> Series: ...
def add_categories(self, new_categories: Scalar | ListLike) -> Series: ...
def remove_categories(self, removals: Scalar | ListLike) -> Series: ...
def remove_unused_categories(self) -> Series: ...
def set_categories(
self,
new_categories: ListLike,
ordered: bool | None = False,
rename: bool = False,
) -> Series: ...
def as_ordered(self) -> Series: ...
def as_unordered(self) -> Series: ...

View File

@ -0,0 +1,114 @@
from collections.abc import Sequence
from typing import overload
import numpy as np
from pandas.core.arrays.base import (
ExtensionArray,
ExtensionOpsMixin,
)
from typing_extensions import (
Self,
TypeAlias,
)
from pandas._libs import (
NaT as NaT,
NaTType as NaTType,
)
from pandas._typing import (
DatetimeLikeScalar,
PositionalIndexerTuple,
ScalarIndexer,
SequenceIndexer,
TimeAmbiguous,
TimeNonexistent,
TimeUnit,
)
DTScalarOrNaT: TypeAlias = DatetimeLikeScalar | NaTType
class DatelikeOps:
def strftime(self, date_format): ...
class TimelikeOps:
@property
def unit(self) -> TimeUnit: ...
def as_unit(self, unit: TimeUnit) -> Self: ...
def round(
self,
freq,
ambiguous: TimeAmbiguous = "raise",
nonexistent: TimeNonexistent = "raise",
): ...
def floor(
self,
freq,
ambiguous: TimeAmbiguous = "raise",
nonexistent: TimeNonexistent = "raise",
): ...
def ceil(
self,
freq,
ambiguous: TimeAmbiguous = "raise",
nonexistent: TimeNonexistent = "raise",
): ...
class DatetimeLikeArrayMixin(ExtensionOpsMixin, ExtensionArray):
@property
def ndim(self) -> int: ...
@property
def shape(self): ...
def reshape(self, *args, **kwargs): ...
def ravel(self, *args, **kwargs): ... # pyrefly: ignore
def __iter__(self): ...
@property
def asi8(self) -> np.ndarray: ...
@property
def nbytes(self): ...
def __array__(self, dtype=...) -> np.ndarray: ...
@property
def size(self) -> int: ...
def __len__(self) -> int: ...
@overload
def __getitem__(self, key: ScalarIndexer) -> DTScalarOrNaT: ...
@overload
def __getitem__(
self,
key: SequenceIndexer | PositionalIndexerTuple,
) -> Self: ...
def __setitem__( # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
self, key: int | Sequence[int] | Sequence[bool] | slice, value
) -> None: ...
def astype(self, dtype, copy: bool = True): ...
def view(self, dtype=...): ...
def unique(self): ...
def copy(self): ...
def shift(self, periods: int = 1, fill_value=..., axis: int = ...): ...
def searchsorted(self, value, side: str = ..., sorter=...): ...
def repeat(self, repeats, *args, **kwargs): ... # pyrefly: ignore
def value_counts(self, dropna: bool = True): ...
def map(self, mapper): ...
def isna(self): ...
def fillna(self, value=..., method=None, limit=None): ...
@property
def freq(self): ...
@freq.setter
def freq(self, value) -> None: ...
@property
def freqstr(self): ...
@property
def inferred_freq(self): ...
@property
def resolution(self): ...
__pow__ = ...
__rpow__ = ...
__rmul__ = ...
def __add__(self, other): ...
def __radd__(self, other): ...
def __sub__(self, other): ...
def __rsub__(self, other): ...
def __iadd__(self, other): ...
def __isub__(self, other): ...
def min(self, *, axis=..., skipna: bool = ..., **kwargs): ...
def max(self, *, axis=..., skipna: bool = ..., **kwargs): ...
def mean(self, *, skipna: bool = ...): ...

View File

@ -0,0 +1,85 @@
from datetime import tzinfo as _tzinfo
import numpy as np
from pandas.core.arrays.datetimelike import (
DatelikeOps,
DatetimeLikeArrayMixin,
TimelikeOps,
)
from pandas._typing import (
TimeAmbiguous,
TimeNonexistent,
TimeZones,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype as DatetimeTZDtype
class DatetimeArray(DatetimeLikeArrayMixin, TimelikeOps, DatelikeOps):
__array_priority__: int = ...
def __init__(self, values, dtype=..., freq=..., copy: bool = ...) -> None: ...
# ignore in dtype() is from the pandas source
@property
def dtype(self) -> np.dtype | DatetimeTZDtype: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
@property
def tz(self): ...
@tz.setter
def tz(self, value) -> None: ...
@property
def tzinfo(self) -> _tzinfo | None: ...
@property
def is_normalized(self): ...
def __array__(self, dtype=...) -> np.ndarray: ...
def __iter__(self): ...
def astype(self, dtype, copy: bool = True): ...
def tz_convert(self, tz: TimeZones): ...
def tz_localize(
self,
tz: TimeZones,
ambiguous: TimeAmbiguous = "raise",
nonexistent: TimeNonexistent = "raise",
): ...
def to_pydatetime(self): ...
def normalize(self): ...
def to_period(self, freq=...): ...
def to_perioddelta(self, freq): ...
def month_name(self, locale=...): ...
def day_name(self, locale=...): ...
@property
def time(self): ...
@property
def timetz(self): ...
@property
def date(self): ...
year = ...
month = ...
day = ...
hour = ...
minute = ...
second = ...
microsecond = ...
nanosecond = ...
dayofweek = ...
weekday = ...
dayofyear = ...
quarter = ...
days_in_month = ...
daysinmonth = ...
is_month_start = ...
is_month_end = ...
is_quarter_start = ...
is_quarter_end = ...
is_year_start = ...
is_year_end = ...
is_leap_year = ...
def to_julian_date(self): ...
def objects_to_datetime64ns(
data,
dayfirst,
yearfirst,
utc: bool = ...,
errors: str = ...,
require_iso8601: bool = ...,
allow_object: bool = ...,
): ...

View File

@ -0,0 +1,4 @@
from pandas.core.arrays.numeric import NumericDtype
class Float32Dtype(NumericDtype): ...
class Float64Dtype(NumericDtype): ...

View File

@ -0,0 +1,31 @@
from pandas.core.arrays.masked import BaseMaskedArray
from pandas._libs.missing import NAType
from pandas.core.dtypes.base import ExtensionDtype as ExtensionDtype
class _IntegerDtype(ExtensionDtype):
base: None
@property
def na_value(self) -> NAType: ...
@property
def itemsize(self) -> int: ...
@classmethod
def construct_array_type(cls) -> type[IntegerArray]: ...
class IntegerArray(BaseMaskedArray):
@property
def dtype(self) -> _IntegerDtype: ...
def __init__(self, values, mask, copy: bool = ...) -> None: ...
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ...
def __setitem__(self, key, value) -> None: ...
def astype(self, dtype, copy: bool = True): ...
class Int8Dtype(_IntegerDtype): ...
class Int16Dtype(_IntegerDtype): ...
class Int32Dtype(_IntegerDtype): ...
class Int64Dtype(_IntegerDtype): ...
class UInt8Dtype(_IntegerDtype): ...
class UInt16Dtype(_IntegerDtype): ...
class UInt32Dtype(_IntegerDtype): ...
class UInt64Dtype(_IntegerDtype): ...

View File

@ -0,0 +1,112 @@
from typing import overload
import numpy as np
from pandas import (
Index,
Series,
)
from pandas.core.arrays.base import ExtensionArray as ExtensionArray
from typing_extensions import (
Self,
TypeAlias,
)
from pandas._libs.interval import (
Interval as Interval,
IntervalMixin as IntervalMixin,
)
from pandas._typing import (
Axis,
Scalar,
ScalarIndexer,
SequenceIndexer,
TakeIndexer,
np_1darray,
)
IntervalOrNA: TypeAlias = Interval | float
class IntervalArray(IntervalMixin, ExtensionArray):
can_hold_na: bool = ...
def __new__(
cls, data, closed=..., dtype=..., copy: bool = ..., verify_integrity: bool = ...
): ...
@classmethod
def from_breaks(
cls,
breaks,
closed: str = "right",
copy: bool = False,
dtype=None,
): ...
@classmethod
def from_arrays(
cls,
left,
right,
closed: str = "right",
copy: bool = False,
dtype=...,
): ...
@classmethod
def from_tuples(
cls,
data,
closed: str = "right",
copy: bool = False,
dtype=None,
): ...
def __iter__(self): ...
def __len__(self) -> int: ...
@overload
def __getitem__(self, key: ScalarIndexer) -> IntervalOrNA: ...
@overload
def __getitem__(self, key: SequenceIndexer) -> Self: ...
def __setitem__(self, key, value) -> None: ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def fillna(self, value=..., method=None, limit=None): ...
@property
def dtype(self): ...
def astype(self, dtype, copy: bool = True): ...
def copy(self): ...
def isna(self): ...
@property
def nbytes(self) -> int: ...
@property
def size(self) -> int: ...
def shift(self, periods: int = 1, fill_value: object = ...) -> IntervalArray: ...
def take( # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
self: Self,
indices: TakeIndexer,
*,
allow_fill: bool = ...,
fill_value=...,
axis=...,
**kwargs,
) -> Self: ...
def value_counts(self, dropna: bool = True): ...
@property
def left(self) -> Index: ...
@property
def right(self) -> Index: ...
@property
def closed(self) -> bool: ...
def set_closed(self, closed): ...
@property
def length(self) -> Index: ...
@property
def mid(self) -> Index: ...
@property
def is_non_overlapping_monotonic(self) -> bool: ...
def __array__(self, dtype=...) -> np_1darray: ...
def __arrow_array__(self, type=...): ...
def to_tuples(self, na_tuple: bool = True): ...
def repeat(self, repeats, axis: Axis | None = ...): ...
@overload
def contains(self, other: Series) -> Series[bool]: ...
@overload
def contains(
self, other: Scalar | ExtensionArray | Index | np.ndarray
) -> np_1darray[np.bool]: ...
def overlaps(self, other: Interval) -> bool: ...

View File

@ -0,0 +1,41 @@
from typing import (
Any,
overload,
)
import numpy as np
from pandas.core.arrays import (
ExtensionArray as ExtensionArray,
ExtensionOpsMixin,
)
from typing_extensions import Self
from pandas._typing import (
Scalar,
ScalarIndexer,
SequenceIndexer,
npt,
)
class BaseMaskedArray(ExtensionArray, ExtensionOpsMixin):
@overload
def __getitem__(self, item: ScalarIndexer) -> Any: ...
@overload
def __getitem__(self, item: SequenceIndexer) -> Self: ...
def __iter__(self): ...
def __len__(self) -> int: ...
def __invert__(self): ...
def to_numpy(
self,
dtype: npt.DTypeLike | None = ...,
copy: bool = False,
na_value: Scalar = ...,
) -> np.ndarray: ...
__array_priority__: int = ...
def __array__(self, dtype=...) -> np.ndarray: ...
def __arrow_array__(self, type=...): ...
def isna(self): ...
@property
def nbytes(self) -> int: ...
def copy(self): ...
def value_counts(self, dropna: bool = True): ...

View File

@ -0,0 +1,3 @@
from pandas.core.dtypes.dtypes import BaseMaskedDtype
class NumericDtype(BaseMaskedDtype): ...

View File

@ -0,0 +1,17 @@
import numpy as np
from numpy.lib.mixins import NDArrayOperatorsMixin
from pandas.core.arrays.base import (
ExtensionArray,
ExtensionOpsMixin,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
class PandasDtype(ExtensionDtype):
@property
def numpy_dtype(self) -> np.dtype: ...
@property
def itemsize(self) -> int: ...
class PandasArray(ExtensionArray, ExtensionOpsMixin, NDArrayOperatorsMixin):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ...

View File

@ -0,0 +1,42 @@
import numpy as np
from pandas import PeriodDtype
from pandas.core.arrays.datetimelike import (
DatelikeOps,
DatetimeLikeArrayMixin,
)
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.period import Period
class PeriodArray(DatetimeLikeArrayMixin, DatelikeOps):
__array_priority__: int = ...
def __init__(self, values, freq=..., dtype=..., copy: bool = ...) -> None: ...
@property
def dtype(self) -> PeriodDtype: ...
def __array__(self, dtype=...) -> np.ndarray: ...
def __arrow_array__(self, type=...): ...
year: int = ...
month: int = ...
day: int = ...
hour: int = ...
minute: int = ...
second: int = ...
weekofyear: int = ...
week: int = ...
dayofweek: int = ...
weekday: int = ...
dayofyear: int = ...
day_of_year = ...
quarter: int = ...
qyear: int = ...
days_in_month: int = ...
daysinmonth: int = ...
@property
def is_leap_year(self) -> bool: ...
@property
def start_time(self) -> Timestamp: ...
@property
def end_time(self) -> Timestamp: ...
def to_timestamp(self, freq: str | None = ..., how: str = ...) -> Timestamp: ...
def asfreq(self, freq: str | None = ..., how: str = "E") -> Period: ...
def astype(self, dtype, copy: bool = True): ...

View File

@ -0,0 +1,6 @@
from pandas.core.arrays.sparse.accessor import (
SparseAccessor as SparseAccessor,
SparseFrameAccessor as SparseFrameAccessor,
)
from pandas.core.arrays.sparse.array import SparseArray as SparseArray
from pandas.core.arrays.sparse.dtype import SparseDtype as SparseDtype

View File

@ -0,0 +1,19 @@
from pandas import Series
from pandas.core.accessor import PandasDelegate
class BaseAccessor:
def __init__(self, data=...) -> None: ...
class SparseAccessor(BaseAccessor, PandasDelegate):
@classmethod
def from_coo(cls, A, dense_index: bool = False) -> Series: ...
def to_coo(self, row_levels=..., column_levels=..., sort_labels: bool = False): ...
def to_dense(self): ...
class SparseFrameAccessor(BaseAccessor, PandasDelegate):
@classmethod
def from_spmatrix(cls, data, index=..., columns=...): ...
def to_dense(self): ...
def to_coo(self): ...
@property
def density(self) -> float: ...

View File

@ -0,0 +1,82 @@
from enum import Enum
from typing import (
Any,
final,
overload,
)
import numpy as np
from pandas.core.arrays import (
ExtensionArray,
ExtensionOpsMixin,
)
from typing_extensions import Self
from pandas._typing import (
ScalarIndexer,
SequenceIndexer,
)
@final
class ellipsis(Enum):
Ellipsis = "..."
class SparseArray(ExtensionArray, ExtensionOpsMixin):
def __init__(
self,
data,
sparse_index=...,
fill_value=...,
kind: str = ...,
dtype=...,
copy: bool = ...,
) -> None: ...
@classmethod
def from_spmatrix(cls, data): ...
def __array__(self, dtype=..., copy=...) -> np.ndarray: ...
def __setitem__(self, key, value) -> None: ...
@property
def sp_index(self): ...
@property
def sp_values(self): ...
@property
def dtype(self): ...
@property
def fill_value(self): ...
@fill_value.setter
def fill_value(self, value) -> None: ...
@property
def kind(self) -> str: ...
def __len__(self) -> int: ...
@property
def nbytes(self) -> int: ...
@property
def density(self): ...
@property
def npoints(self) -> int: ...
def isna(self): ...
def fillna(self, value=..., method=..., limit=...): ...
def shift(self, periods: int = 1, fill_value=...): ...
def unique(self): ...
def value_counts(self, dropna: bool = True): ...
@overload
def __getitem__(self, key: ScalarIndexer) -> Any: ...
@overload
def __getitem__(
self,
key: SequenceIndexer | tuple[int | ellipsis, ...],
) -> Self: ...
def copy(self): ...
def astype(self, dtype=..., copy: bool = True): ...
def map(self, mapper): ...
def to_dense(self): ...
def nonzero(self): ...
def all(self, axis=..., *args, **kwargs): ...
def any(self, axis: int = ..., *args, **kwargs): ...
def sum(self, axis: int = 0, *args, **kwargs): ...
def cumsum(self, axis: int = ..., *args, **kwargs): ...
def mean(self, axis: int = ..., *args, **kwargs): ...
@property
def T(self): ...
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ...
def __abs__(self): ...

View File

@ -0,0 +1,17 @@
from pandas._typing import (
Dtype,
Scalar,
npt,
)
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.dtypes import (
register_extension_dtype as register_extension_dtype,
)
class SparseDtype(ExtensionDtype):
def __init__(
self, dtype: Dtype | npt.DTypeLike = ..., fill_value: Scalar | None = ...
) -> None: ...
@property
def fill_value(self) -> Scalar | None: ...

View File

@ -0,0 +1,20 @@
from typing import Literal
from pandas.core.arrays import PandasArray
from pandas._libs.missing import NAType
from pandas.core.dtypes.base import ExtensionDtype
class StringDtype(ExtensionDtype):
def __init__(self, storage: Literal["python", "pyarrow"] | None = None) -> None: ...
@property
def na_value(self) -> NAType: ...
class StringArray(PandasArray):
def __init__(self, values, copy: bool = ...) -> None: ...
def __arrow_array__(self, type=...): ...
def __setitem__(self, key, value) -> None: ...
def fillna(self, value=..., method=None, limit=None): ...
def astype(self, dtype, copy: bool = True): ...
def value_counts(self, dropna: bool = True): ...

View File

@ -0,0 +1,65 @@
from collections.abc import Sequence
from datetime import timedelta
from pandas.core.arrays.datetimelike import (
DatetimeLikeArrayMixin,
TimelikeOps,
)
class TimedeltaArray(DatetimeLikeArrayMixin, TimelikeOps):
__array_priority__: int = ...
@property
def dtype(self): ...
def __init__(self, values, dtype=..., freq=..., copy: bool = ...) -> None: ...
def astype(self, dtype, copy: bool = True): ...
def sum(
self,
*,
axis=...,
dtype=...,
out=...,
keepdims: bool = ...,
initial=...,
skipna: bool = ...,
min_count: int = ...,
): ...
def std(
self,
*,
axis=...,
dtype=...,
out=...,
ddof: int = ...,
keepdims: bool = ...,
skipna: bool = ...,
): ...
def median(
self,
*,
axis=...,
out=...,
overwrite_input: bool = ...,
keepdims: bool = ...,
skipna: bool = ...,
): ...
def __mul__(self, other): ...
__rmul__ = ...
def __truediv__(self, other): ...
def __rtruediv__(self, other): ...
def __floordiv__(self, other): ...
def __rfloordiv__(self, other): ...
def __mod__(self, other): ...
def __rmod__(self, other): ...
def __divmod__(self, other): ...
def __rdivmod__(self, other): ...
def __neg__(self): ...
def __pos__(self): ...
def __abs__(self): ...
def total_seconds(self) -> int: ...
def to_pytimedelta(self) -> Sequence[timedelta]: ...
days: int = ...
seconds: int = ...
microseconds: int = ...
nanoseconds: int = ...
@property
def components(self) -> int: ...

View File

@ -0,0 +1,142 @@
from collections.abc import (
Hashable,
Iterator,
)
from typing import (
Any,
Generic,
Literal,
final,
overload,
)
import numpy as np
from pandas import (
Index,
Series,
)
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.categorical import Categorical
from typing_extensions import Self
from pandas._typing import (
S1,
AxisIndex,
DropKeep,
DTypeLike,
GenericT,
GenericT_co,
NDFrameT,
Scalar,
SupportsDType,
np_1darray,
)
from pandas.util._decorators import cache_readonly
class NoNewAttributesMixin:
def __setattr__(self, key: str, value: Any) -> None: ...
class SelectionMixin(Generic[NDFrameT]):
obj: NDFrameT
exclusions: frozenset[Hashable]
@final
@cache_readonly
def ndim(self) -> int: ...
def __getitem__(self, key): ...
def aggregate(self, func, *args, **kwargs): ...
class IndexOpsMixin(OpsMixin, Generic[S1, GenericT_co]):
__array_priority__: int = ...
@property
def T(self) -> Self: ...
@property
def shape(self) -> tuple: ...
@property
def ndim(self) -> int: ...
def item(self) -> S1: ...
@property
def nbytes(self) -> int: ...
@property
def size(self) -> int: ...
@property
def array(self) -> ExtensionArray: ...
@overload
def to_numpy(
self,
dtype: None = None,
copy: bool = False,
na_value: Scalar = ...,
**kwargs,
) -> np_1darray[GenericT_co]: ...
@overload
def to_numpy(
self,
dtype: np.dtype[GenericT] | SupportsDType[GenericT] | type[GenericT],
copy: bool = False,
na_value: Scalar = ...,
**kwargs,
) -> np_1darray[GenericT]: ...
@overload
def to_numpy(
self,
dtype: DTypeLike,
copy: bool = False,
na_value: Scalar = ...,
**kwargs,
) -> np_1darray: ...
@property
def empty(self) -> bool: ...
def max(self, axis=..., skipna: bool = ..., **kwargs): ...
def min(self, axis=..., skipna: bool = ..., **kwargs): ...
def argmax(
self,
axis: AxisIndex | None = ...,
skipna: bool = True,
*args,
**kwargs,
) -> np.int64: ...
def argmin(
self,
axis: AxisIndex | None = ...,
skipna: bool = True,
*args,
**kwargs,
) -> np.int64: ...
def tolist(self) -> list[S1]: ...
def to_list(self) -> list[S1]: ...
def __iter__(self) -> Iterator[S1]: ...
@property
def hasnans(self) -> bool: ...
@overload
def value_counts(
self,
normalize: Literal[False] = ...,
sort: bool = ...,
ascending: bool = ...,
bins=...,
dropna: bool = ...,
) -> Series[int]: ...
@overload
def value_counts(
self,
normalize: Literal[True],
sort: bool = ...,
ascending: bool = ...,
bins=...,
dropna: bool = ...,
) -> Series[float]: ...
def nunique(self, dropna: bool = True) -> int: ...
@property
def is_unique(self) -> bool: ...
@property
def is_monotonic_decreasing(self) -> bool: ...
@property
def is_monotonic_increasing(self) -> bool: ...
def factorize(
self, sort: bool = False, use_na_sentinel: bool = True
) -> tuple[np_1darray, np_1darray | Index | Categorical]: ...
def searchsorted(
self, value, side: Literal["left", "right"] = ..., sorter=...
) -> int | list[int]: ...
def drop_duplicates(self, *, keep: DropKeep = ...) -> Self: ...

View File

@ -0,0 +1 @@
from pandas.core.computation.eval import eval as eval

View File

@ -0,0 +1,17 @@
import abc
class AbstractEngine(metaclass=abc.ABCMeta):
has_neg_frac: bool = ...
expr = ...
aligned_axes = ...
result_type = ...
def __init__(self, expr) -> None: ...
def convert(self) -> str: ...
def evaluate(self) -> object: ...
class NumExprEngine(AbstractEngine):
has_neg_frac: bool = ...
class PythonEngine(AbstractEngine):
has_neg_frac: bool = ...
def evaluate(self): ...

View File

@ -0,0 +1,28 @@
from collections.abc import Mapping
from typing import (
Any,
Literal,
)
from pandas import (
DataFrame,
Series,
)
from pandas.core.computation.ops import BinOp
from pandas._typing import (
Scalar,
npt,
)
def eval(
expr: str | BinOp,
parser: Literal["pandas", "python"] = "pandas",
engine: Literal["python", "numexpr"] | None = ...,
local_dict: dict[str, Any] | None = None,
global_dict: dict[str, Any] | None = None,
resolvers: list[Mapping] | None = ...,
level: int = 0,
target: object | None = None,
inplace: bool = False,
) -> npt.NDArray | Scalar | DataFrame | Series | None: ...

View File

@ -0,0 +1,64 @@
import ast
from pandas.core.computation.ops import Term as Term
from pandas.core.computation.scope import Scope as Scope
class BaseExprVisitor(ast.NodeVisitor):
const_type = ...
term_type = ...
binary_ops = ...
binary_op_nodes = ...
binary_op_nodes_map = ...
unary_ops = ...
unary_op_nodes = ...
unary_op_nodes_map = ...
rewrite_map = ...
env = ...
engine = ...
parser = ...
preparser = ...
assigner = ...
def __init__(self, env, engine, parser, preparser=...) -> None: ...
def visit(self, node, **kwargs): ...
def visit_Module(self, node, **kwargs): ...
def visit_Expr(self, node, **kwargs): ...
def visit_BinOp(self, node, **kwargs): ...
def visit_Div(self, node, **kwargs): ...
def visit_UnaryOp(self, node, **kwargs): ...
def visit_Name(self, node, **kwargs): ...
def visit_NameConstant(self, node, **kwargs): ...
def visit_Num(self, node, **kwargs): ...
def visit_Constant(self, node, **kwargs): ...
def visit_Str(self, node, **kwargs): ...
def visit_List(self, node, **kwargs): ...
def visit_Index(self, node, **kwargs): ...
def visit_Subscript(self, node, **kwargs): ...
def visit_Slice(self, node, **kwargs): ...
def visit_Assign(self, node, **kwargs): ...
def visit_Attribute(self, node, **kwargs): ...
def visit_Call(self, node, side=..., **kwargs): ...
def translate_In(self, op): ...
def visit_Compare(self, node, **kwargs): ...
def visit_BoolOp(self, node, **kwargs): ...
class Expr:
env: Scope
engine: str
parser: str
expr = ...
terms = ...
def __init__(
self,
expr,
engine: str = ...,
parser: str = ...,
env: Scope | None = ...,
level: int = ...,
) -> None: ...
@property
def assigner(self): ...
def __call__(self): ...
def __len__(self) -> int: ...
def parse(self): ...
@property
def names(self): ...

View File

@ -0,0 +1,88 @@
import numpy as np
class UndefinedVariableError(NameError):
def __init__(self, name, is_local: bool = ...) -> None: ...
class Term:
def __new__(cls, name, env, side=..., encoding=...): ...
is_local: bool
env = ...
side = ...
encoding = ...
def __init__(self, name, env, side=..., encoding=...) -> None: ...
@property
def local_name(self) -> str: ...
def __call__(self, *args, **kwargs): ...
def evaluate(self, *args, **kwargs): ...
def update(self, value) -> None: ...
@property
def is_scalar(self) -> bool: ...
@property
def type(self): ...
return_type = ...
@property
def raw(self) -> str: ...
@property
def is_datetime(self) -> bool: ...
@property
def value(self): ...
@value.setter
def value(self, new_value) -> None: ...
@property
def name(self): ...
@property
def ndim(self) -> int: ...
class Constant(Term):
@property
def name(self): ...
class Op:
op: str
operands = ...
encoding = ...
def __init__(self, op: str, operands, *args, **kwargs) -> None: ...
def __iter__(self): ...
@property
def return_type(self): ...
@property
def has_invalid_return_type(self) -> bool: ...
@property
def operand_types(self): ...
@property
def is_scalar(self) -> bool: ...
@property
def is_datetime(self) -> bool: ...
class BinOp(Op):
lhs = ...
rhs = ...
func = ...
def __init__(self, op: str, lhs, rhs, **kwargs) -> None: ...
def __call__(self, env): ...
def evaluate(self, env, engine: str, parser, term_type, eval_in_python): ...
def convert_values(self): ...
def isnumeric(dtype) -> bool: ...
class Div(BinOp):
def __init__(self, lhs, rhs, **kwargs) -> None: ...
class UnaryOp(Op):
operand = ...
func = ...
def __init__(self, op: str, operand) -> None: ...
def __call__(self, env): ...
@property
def return_type(self) -> np.dtype: ...
class MathCall(Op):
func = ...
def __init__(self, func, args) -> None: ...
def __call__(self, env): ...
class FuncNode:
name = ...
func = ...
def __init__(self, name: str) -> None: ...
def __call__(self, *args): ...

View File

@ -0,0 +1,108 @@
from typing import Any
from pandas.core.computation import (
expr as expr,
ops as ops,
scope as _scope,
)
from pandas.core.computation.expr import BaseExprVisitor as BaseExprVisitor
from pandas.core.indexes.base import Index
class PyTablesScope(_scope.Scope):
queryables: dict[str, Any]
def __init__(
self,
level: int,
global_dict=...,
local_dict=...,
queryables: dict[str, Any] | None = ...,
) -> None: ...
class Term(ops.Term):
env = ...
def __new__(cls, name, env, side=..., encoding=...): ...
def __init__(self, name, env: PyTablesScope, side=..., encoding=...) -> None: ...
@property
def value(self): ...
@value.setter
def value(self, new_value) -> None: ...
class Constant(Term):
def __init__(self, name, env: PyTablesScope, side=..., encoding=...) -> None: ...
class BinOp(ops.BinOp):
op: str
queryables: dict[str, Any]
encoding = ...
condition = ...
def __init__(
self, op: str, lhs, rhs, queryables: dict[str, Any], encoding
) -> None: ...
def prune(self, klass): ...
def conform(self, rhs): ...
@property
def is_valid(self) -> bool: ...
@property
def is_in_table(self) -> bool: ...
@property
def kind(self): ...
@property
def meta(self): ...
@property
def metadata(self): ...
def generate(self, v) -> str: ...
def convert_value(self, v) -> TermValue: ...
def convert_values(self) -> None: ...
class FilterBinOp(BinOp):
filter: tuple[Any, Any, Index] | None = ...
def invert(self): ...
def format(self): ...
def generate_filter_op(self, invert: bool = ...): ...
class JointFilterBinOp(FilterBinOp):
def format(self) -> None: ...
class ConditionBinOp(BinOp):
def invert(self) -> None: ...
def format(self): ...
condition = ...
class JointConditionBinOp(ConditionBinOp):
condition = ...
class UnaryOp(ops.UnaryOp):
def prune(self, klass): ...
class PyTablesExprVisitor(BaseExprVisitor):
const_type = ...
term_type = ...
def __init__(self, env, engine, parser, **kwargs) -> None: ...
def visit_UnaryOp(self, node, **kwargs): ...
def visit_Index(self, node, **kwargs): ...
def visit_Assign(self, node, **kwargs): ...
def visit_Subscript(self, node, **kwargs): ...
def visit_Attribute(self, node, **kwargs): ...
def translate_In(self, op): ...
class PyTablesExpr(expr.Expr):
encoding = ...
condition = ...
filter = ...
terms = ...
expr = ...
def __init__(
self,
where,
queryables: dict[str, Any] | None = ...,
encoding=...,
scope_level: int = ...,
) -> None: ...
def evaluate(self): ...
class TermValue:
value = ...
converted = ...
kind = ...
def __init__(self, value, converted, kind: str) -> None: ...
def tostring(self, encoding) -> str: ...

View File

@ -0,0 +1,18 @@
class Scope:
level = ...
scope = ...
target = ...
resolvers = ...
temps = ...
def __init__(
self, level, global_dict=..., local_dict=..., resolvers=..., target=...
) -> None: ...
@property
def has_resolvers(self) -> bool: ...
def resolve(self, key: str, is_local: bool): ...
def swapkey(self, old_key: str, new_key: str, new_value=...): ...
def add_tmp(self, value) -> str: ...
@property
def ntemps(self) -> int: ...
@property
def full_scope(self): ...

View File

@ -0,0 +1,51 @@
from typing import Literal
use_bottleneck_doc: str = ...
use_numexpr_doc: str = ...
pc_precision_doc: str = ...
pc_colspace_doc: str = ...
pc_max_rows_doc: str = ...
pc_min_rows_doc: str = ...
pc_max_cols_doc: str = ...
pc_max_categories_doc: str = ...
pc_max_info_cols_doc: str = ...
pc_nb_repr_h_doc: str = ...
pc_pprint_nest_depth: str = ...
pc_multi_sparse_doc: str = ...
float_format_doc: str = ...
max_colwidth_doc: str = ...
colheader_justify_doc: str = ...
pc_expand_repr_doc: str = ...
pc_show_dimensions_doc: str = ...
pc_east_asian_width_doc: str = ...
pc_ambiguous_as_wide_doc: str = ...
pc_latex_repr_doc: str = ...
pc_table_schema_doc: str = ...
pc_html_border_doc: str = ...
pc_html_use_mathjax_doc: str = ...
pc_width_doc: str = ...
pc_chop_threshold_doc: str = ...
pc_max_seq_items: str = ...
pc_max_info_rows_doc: str = ...
pc_large_repr_doc: str = ...
pc_memory_usage_doc: str = ...
pc_latex_escape: str = ...
pc_latex_longtable: str = ...
pc_latex_multicolumn: str = ...
pc_latex_multicolumn_format: str = ...
pc_latex_multirow: str = ...
max_cols: int = ...
tc_sim_interactive_doc: str = ...
use_inf_as_null_doc: str = ...
use_inf_as_na_doc: str = ...
chained_assignment: Literal["warn", "raise"] | None
reader_engine_doc: str = ...
writer_engine_doc: str = ...
parquet_engine_doc: str = ...
plotting_backend_doc: str = ...
register_converter_doc: str = ...

View File

@ -0,0 +1,12 @@
from collections.abc import Sequence
import numpy as np
from pandas.core.arrays.base import ExtensionArray
from pandas.core.dtypes.dtypes import ExtensionDtype
def array(
data: Sequence[object],
dtype: str | np.dtype | ExtensionDtype | None = None,
copy: bool = True,
) -> ExtensionArray: ...

View File

@ -0,0 +1,34 @@
from pandas.core.dtypes.common import (
is_any_real_numeric_dtype as is_any_real_numeric_dtype,
is_bool as is_bool,
is_bool_dtype as is_bool_dtype,
is_complex as is_complex,
is_complex_dtype as is_complex_dtype,
is_datetime64_any_dtype as is_datetime64_any_dtype,
is_datetime64_dtype as is_datetime64_dtype,
is_datetime64_ns_dtype as is_datetime64_ns_dtype,
is_dict_like as is_dict_like,
is_dtype_equal as is_dtype_equal,
is_extension_array_dtype as is_extension_array_dtype,
is_file_like as is_file_like,
is_float as is_float,
is_float_dtype as is_float_dtype,
is_hashable as is_hashable,
is_integer as is_integer,
is_integer_dtype as is_integer_dtype,
is_iterator as is_iterator,
is_list_like as is_list_like,
is_named_tuple as is_named_tuple,
is_number as is_number,
is_numeric_dtype as is_numeric_dtype,
is_object_dtype as is_object_dtype,
is_re as is_re,
is_re_compilable as is_re_compilable,
is_scalar as is_scalar,
is_signed_integer_dtype as is_signed_integer_dtype,
is_string_dtype as is_string_dtype,
is_timedelta64_dtype as is_timedelta64_dtype,
is_timedelta64_ns_dtype as is_timedelta64_ns_dtype,
is_unsigned_integer_dtype as is_unsigned_integer_dtype,
pandas_dtype as pandas_dtype,
)

View File

@ -0,0 +1,35 @@
from typing import (
ClassVar,
Literal,
TypeVar,
)
from pandas.core.arrays import ExtensionArray
from pandas._typing import type_t
class ExtensionDtype:
type: ClassVar[type_t]
name: ClassVar[str]
@property
def na_value(self) -> object: ...
@property
def kind(
self,
) -> Literal["b", "i", "u", "f", "c", "m", "M", "O", "S", "U", "V", "T"]: ...
@property
def names(self) -> list[str] | None: ...
def empty(self, size: int | tuple[int, ...]) -> type_t[ExtensionArray]: ...
@classmethod
def construct_array_type(cls) -> type_t[ExtensionArray]: ...
@classmethod
def construct_from_string(cls, string: str) -> ExtensionDtype: ...
@classmethod
def is_dtype(cls, dtype: object) -> bool: ...
class StorageExtensionDtype(ExtensionDtype): ...
_ExtensionDtypeT = TypeVar("_ExtensionDtypeT", bound=ExtensionDtype)
def register_extension_dtype(cls: type[_ExtensionDtypeT]) -> type[_ExtensionDtypeT]: ...

View File

@ -0,0 +1,50 @@
import pandas as pd
from pandas.api.extensions import ExtensionDtype
from typing_extensions import TypeAlias
from pandas._typing import (
ArrayLike,
Dtype,
DtypeObj,
npt,
)
from pandas.core.dtypes.inference import (
is_bool as is_bool,
is_complex as is_complex,
is_dict_like as is_dict_like,
is_file_like as is_file_like,
is_float as is_float,
is_hashable as is_hashable,
is_integer as is_integer,
is_iterator as is_iterator,
is_list_like as is_list_like,
is_named_tuple as is_named_tuple,
is_number as is_number,
is_re as is_re,
is_re_compilable as is_re_compilable,
is_scalar as is_scalar,
)
_ArrayOrDtype: TypeAlias = (
ArrayLike | npt.DTypeLike | pd.Series | pd.DataFrame | pd.Index | ExtensionDtype
)
def is_object_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_datetime64_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_timedelta64_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_dtype_equal(source: Dtype, target: Dtype) -> bool: ...
def is_string_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_integer_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_signed_integer_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_unsigned_integer_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_datetime64_any_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_datetime64_ns_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_timedelta64_ns_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_numeric_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_float_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_bool_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_extension_array_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_complex_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def is_any_real_numeric_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
def pandas_dtype(dtype: object) -> DtypeObj: ...

View File

@ -0,0 +1,15 @@
from typing import TypeVar
from pandas import (
Categorical,
CategoricalIndex,
Series,
)
_CatT = TypeVar("_CatT", bound=Categorical | CategoricalIndex | Series)
def union_categoricals(
to_union: list[_CatT],
sort_categories: bool = False,
ignore_order: bool = False,
) -> Categorical: ...

View File

@ -0,0 +1,63 @@
import datetime as dt
from typing import (
Any,
Literal,
)
import numpy as np
from pandas.core.indexes.base import Index
from pandas.core.series import Series
from pandas._libs import NaTType
from pandas._libs.tslibs import BaseOffset
from pandas._libs.tslibs.offsets import (
RelativeDeltaOffset,
SingleConstructorOffset,
)
from pandas._typing import (
Ordered,
TimeZones,
npt,
)
from pandas.core.dtypes.base import (
ExtensionDtype as ExtensionDtype,
register_extension_dtype as register_extension_dtype,
)
class BaseMaskedDtype(ExtensionDtype): ...
class PandasExtensionDtype(ExtensionDtype): ...
class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
def __init__(
self,
categories: Series | Index | list[Any] | None = ...,
ordered: Ordered = ...,
) -> None: ...
@property
def categories(self) -> Index: ...
@property
def ordered(self) -> Ordered: ...
class DatetimeTZDtype(PandasExtensionDtype):
def __init__(self, unit: Literal["ns"] = ..., tz: TimeZones = ...) -> None: ...
@property
def unit(self) -> Literal["ns"]: ...
@property
def tz(self) -> dt.tzinfo: ...
@property
def na_value(self) -> NaTType: ...
class PeriodDtype(PandasExtensionDtype):
def __init__(
self, freq: str | SingleConstructorOffset | RelativeDeltaOffset = ...
) -> None: ...
@property
def freq(self) -> BaseOffset: ...
@property
def na_value(self) -> NaTType: ...
class IntervalDtype(PandasExtensionDtype):
def __init__(self, subtype: str | npt.DTypeLike | None = ...) -> None: ...
@property
def subtype(self) -> np.dtype | None: ...

View File

@ -0,0 +1,6 @@
from pandas import Series
from pandas.core.arrays import ExtensionArray
from typing_extensions import TypeAlias
ABCSeries: TypeAlias = type[Series]
ABCExtensionArray: TypeAlias = type[ExtensionArray]

View File

@ -0,0 +1,17 @@
from pandas._libs import lib
is_bool = lib.is_bool
is_integer = lib.is_integer
is_float = lib.is_float
is_complex = lib.is_complex
is_scalar = lib.is_scalar
is_list_like = lib.is_list_like
is_iterator = lib.is_iterator
def is_number(obj: object) -> bool: ...
def is_file_like(obj: object) -> bool: ...
def is_re(obj: object) -> bool: ...
def is_re_compilable(obj: object) -> bool: ...
def is_dict_like(obj: object) -> bool: ...
def is_named_tuple(obj: object) -> bool: ...
def is_hashable(obj: object) -> bool: ...

View File

@ -0,0 +1,59 @@
from typing import (
Any,
overload,
)
import numpy as np
from pandas import (
DataFrame,
Index,
Series,
)
from pandas.core.arrays import ExtensionArray
from typing_extensions import TypeIs
from pandas._libs.missing import NAType
from pandas._libs.tslibs import NaTType
from pandas._typing import (
Scalar,
ScalarT,
ShapeT,
np_1darray,
np_ndarray,
np_ndarray_bool,
)
isposinf_scalar = ...
isneginf_scalar = ...
@overload
def isna(obj: DataFrame) -> DataFrame: ...
@overload
def isna(obj: Series) -> Series[bool]: ...
@overload
def isna(obj: Index | ExtensionArray | list[ScalarT]) -> np_1darray[np.bool]: ...
@overload
def isna(obj: np_ndarray[ShapeT]) -> np_ndarray[ShapeT, np.bool]: ...
@overload
def isna(obj: list[Any]) -> np_ndarray_bool: ...
@overload
def isna(
obj: Scalar | NaTType | NAType | None,
) -> TypeIs[NaTType | NAType | None]: ...
isnull = isna
@overload
def notna(obj: DataFrame) -> DataFrame: ...
@overload
def notna(obj: Series) -> Series[bool]: ...
@overload
def notna(obj: Index | ExtensionArray | list[ScalarT]) -> np_1darray[np.bool]: ...
@overload
def notna(obj: np_ndarray[ShapeT]) -> np_ndarray[ShapeT, np.bool]: ...
@overload
def notna(obj: list[Any]) -> np_ndarray_bool: ...
@overload
def notna(obj: ScalarT | NaTType | NAType | None) -> TypeIs[ScalarT]: ...
notnull = notna

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,435 @@
from builtins import (
bool as _bool,
str as _str,
)
from collections.abc import (
Callable,
Hashable,
Iterable,
Mapping,
Sequence,
)
import datetime as dt
import sqlite3
from typing import (
Any,
ClassVar,
Literal,
final,
overload,
)
import numpy as np
from pandas import Index
import pandas.core.indexing as indexing
from pandas.core.resample import DatetimeIndexResampler
from pandas.core.series import (
Series,
)
import sqlalchemy.engine
from typing_extensions import (
Concatenate,
Self,
)
from pandas._libs.lib import _NoDefaultDoNotUse
from pandas._typing import (
Axis,
CompressionOptions,
CSVQuoting,
DtypeArg,
DtypeBackend,
ExcelWriterMergeCells,
FilePath,
FileWriteMode,
Frequency,
HashableT1,
HashableT2,
HDFCompLib,
IgnoreRaise,
IndexLabel,
Level,
OpenFileErrors,
P,
StorageOptions,
T,
TakeIndexer,
TimedeltaConvertibleTypes,
TimeGrouperOrigin,
TimestampConvertibleTypes,
WriteBuffer,
)
from pandas.io.pytables import HDFStore
from pandas.io.sql import SQLTable
class NDFrame(indexing.IndexingMixin):
__hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride]
@final
def set_flags(
self,
*,
copy: _bool = ...,
allows_duplicate_labels: _bool | None = ...,
) -> Self: ...
@property
def attrs(self) -> dict[Hashable | None, Any]: ...
@attrs.setter
def attrs(self, value: Mapping[Hashable | None, Any]) -> None: ...
@property
def shape(self) -> tuple[int, ...]: ...
@property
def ndim(self) -> int: ...
@property
def size(self) -> int: ...
def equals(self, other: Series) -> _bool: ...
@final
def __neg__(self) -> Self: ...
@final
def __pos__(self) -> Self: ...
@final
def __nonzero__(self) -> None: ...
@final
def bool(self) -> _bool: ...
def __abs__(self) -> Self: ...
@final
def __round__(self, decimals: int = ...) -> Self: ...
@final
def __contains__(self, key) -> _bool: ...
@property
def empty(self) -> _bool: ...
__array_priority__: int = ...
def __array__(self, dtype=...) -> np.ndarray: ...
@final
def to_excel(
self,
excel_writer,
sheet_name: _str = "Sheet1",
na_rep: _str = "",
float_format: _str | None = ...,
columns: _str | Sequence[_str] | None = ...,
header: _bool | list[_str] = True,
index: _bool = True,
index_label: _str | Sequence[_str] | None = ...,
startrow: int = 0,
startcol: int = 0,
engine: _str | None = ...,
merge_cells: ExcelWriterMergeCells = True,
inf_rep: _str = "inf",
freeze_panes: tuple[int, int] | None = ...,
) -> None: ...
@final
def to_hdf(
self,
path_or_buf: FilePath | HDFStore,
*,
key: _str,
mode: Literal["a", "w", "r+"] = ...,
complevel: int | None = ...,
complib: HDFCompLib | None = ...,
append: _bool = ...,
format: Literal["t", "table", "f", "fixed"] | None = ...,
index: _bool = ...,
min_itemsize: int | dict[HashableT1, int] | None = ...,
nan_rep: _str | None = ...,
dropna: _bool | None = ...,
data_columns: Literal[True] | list[HashableT2] | None = ...,
errors: OpenFileErrors = ...,
encoding: _str = ...,
) -> None: ...
@overload
def to_markdown(
self,
buf: FilePath | WriteBuffer[str],
*,
mode: FileWriteMode = ...,
index: _bool = ...,
storage_options: StorageOptions = ...,
**kwargs: Any,
) -> None: ...
@overload
def to_markdown(
self,
buf: None = ...,
*,
mode: FileWriteMode | None = ...,
index: _bool = ...,
storage_options: StorageOptions = ...,
**kwargs: Any,
) -> _str: ...
@final
def to_sql(
self,
name: _str,
con: str | sqlalchemy.engine.Connectable | sqlite3.Connection,
schema: _str | None = ...,
if_exists: Literal["fail", "replace", "append"] = "fail",
index: _bool = True,
index_label: IndexLabel = None,
chunksize: int | None = ...,
dtype: DtypeArg | None = ...,
method: (
Literal["multi"]
| Callable[
[SQLTable, Any, list[str], Iterable[tuple[Any, ...]]],
int | None,
]
| None
) = ...,
) -> int | None: ...
@final
def to_pickle(
self,
path: FilePath | WriteBuffer[bytes],
compression: CompressionOptions = "infer",
protocol: int = 5,
storage_options: StorageOptions = ...,
) -> None: ...
@final
def to_clipboard(
self,
excel: _bool = True,
sep: _str | None = None,
*,
na_rep: _str = ...,
float_format: _str | Callable[[object], _str] | None = ...,
columns: list[HashableT1] | None = ...,
header: _bool | list[_str] = ...,
index: _bool = ...,
index_label: Literal[False] | _str | list[HashableT2] | None = ...,
mode: FileWriteMode = ...,
encoding: _str | None = ...,
compression: CompressionOptions = ...,
quoting: CSVQuoting = ...,
quotechar: _str = ...,
lineterminator: _str | None = ...,
chunksize: int | None = ...,
date_format: _str | None = ...,
doublequote: _bool = ...,
escapechar: _str | None = ...,
decimal: _str = ...,
errors: _str = ...,
storage_options: StorageOptions = ...,
) -> None: ...
@overload
def to_latex(
self,
buf: FilePath | WriteBuffer[str],
columns: list[_str] | None = ...,
header: _bool | list[_str] = ...,
index: _bool = ...,
na_rep: _str = ...,
formatters=...,
float_format=...,
sparsify: _bool | None = ...,
index_names: _bool = ...,
bold_rows: _bool = ...,
column_format: _str | None = ...,
longtable: _bool | None = ...,
escape: _bool | None = ...,
encoding: _str | None = ...,
decimal: _str = ...,
multicolumn: _bool | None = ...,
multicolumn_format: _str | None = ...,
multirow: _bool | None = ...,
caption: _str | tuple[_str, _str] | None = ...,
label: _str | None = ...,
position: _str | None = ...,
) -> None: ...
@overload
def to_latex(
self,
buf: None = ...,
columns: list[_str] | None = ...,
header: _bool | list[_str] = ...,
index: _bool = ...,
na_rep: _str = ...,
formatters=...,
float_format=...,
sparsify: _bool | None = ...,
index_names: _bool = ...,
bold_rows: _bool = ...,
column_format: _str | None = ...,
longtable: _bool | None = ...,
escape: _bool | None = ...,
encoding: _str | None = ...,
decimal: _str = ...,
multicolumn: _bool | None = ...,
multicolumn_format: _str | None = ...,
multirow: _bool | None = ...,
caption: _str | tuple[_str, _str] | None = ...,
label: _str | None = ...,
position: _str | None = ...,
) -> _str: ...
@overload
def to_csv(
self,
path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str],
sep: _str = ...,
na_rep: _str = ...,
float_format: _str | Callable[[object], _str] | None = ...,
columns: list[HashableT1] | None = ...,
header: _bool | list[_str] = ...,
index: _bool = ...,
index_label: Literal[False] | _str | list[HashableT2] | None = ...,
mode: FileWriteMode = ...,
encoding: _str | None = ...,
compression: CompressionOptions = ...,
quoting: CSVQuoting = ...,
quotechar: _str = ...,
lineterminator: _str | None = ...,
chunksize: int | None = ...,
date_format: _str | None = ...,
doublequote: _bool = ...,
escapechar: _str | None = ...,
decimal: _str = ...,
errors: OpenFileErrors = ...,
storage_options: StorageOptions = ...,
) -> None: ...
@overload
def to_csv(
self,
path_or_buf: None = ...,
sep: _str = ...,
na_rep: _str = ...,
float_format: _str | Callable[[object], _str] | None = ...,
columns: list[HashableT1] | None = ...,
header: _bool | list[_str] = ...,
index: _bool = ...,
index_label: Literal[False] | _str | list[HashableT2] | None = ...,
mode: FileWriteMode = ...,
encoding: _str | None = ...,
compression: CompressionOptions = ...,
quoting: CSVQuoting = ...,
quotechar: _str = ...,
lineterminator: _str | None = ...,
chunksize: int | None = ...,
date_format: _str | None = ...,
doublequote: _bool = ...,
escapechar: _str | None = ...,
decimal: _str = ...,
errors: OpenFileErrors = ...,
storage_options: StorageOptions = ...,
) -> _str: ...
@final
def __delitem__(self, idx: Hashable) -> None: ...
@overload
def drop(
self,
labels: None = ...,
*,
axis: Axis = ...,
index: Hashable | Sequence[Hashable] | Index = ...,
columns: Hashable | Iterable[Hashable],
level: Level | None = ...,
inplace: Literal[True],
errors: IgnoreRaise = ...,
) -> None: ...
@overload
def drop(
self,
labels: None = ...,
*,
axis: Axis = ...,
index: Hashable | Sequence[Hashable] | Index,
columns: Hashable | Iterable[Hashable] = ...,
level: Level | None = ...,
inplace: Literal[True],
errors: IgnoreRaise = ...,
) -> None: ...
@overload
def drop(
self,
labels: Hashable | Sequence[Hashable] | Index,
*,
axis: Axis = ...,
index: None = ...,
columns: None = ...,
level: Level | None = ...,
inplace: Literal[True],
errors: IgnoreRaise = ...,
) -> None: ...
@overload
def drop(
self,
labels: None = ...,
*,
axis: Axis = ...,
index: Hashable | Sequence[Hashable] | Index = ...,
columns: Hashable | Iterable[Hashable],
level: Level | None = ...,
inplace: Literal[False] = ...,
errors: IgnoreRaise = ...,
) -> Self: ...
@overload
def drop(
self,
labels: None = ...,
*,
axis: Axis = ...,
index: Hashable | Sequence[Hashable] | Index,
columns: Hashable | Iterable[Hashable] = ...,
level: Level | None = ...,
inplace: Literal[False] = ...,
errors: IgnoreRaise = ...,
) -> Self: ...
@overload
def drop(
self,
labels: Hashable | Sequence[Hashable] | Index,
*,
axis: Axis = ...,
index: None = ...,
columns: None = ...,
level: Level | None = ...,
inplace: Literal[False] = ...,
errors: IgnoreRaise = ...,
) -> Self: ...
@overload
def pipe(
self,
func: Callable[Concatenate[Self, P], T],
*args: P.args,
**kwargs: P.kwargs,
) -> T: ...
@overload
def pipe(
self,
func: tuple[Callable[..., T], str],
*args: Any,
**kwargs: Any,
) -> T: ...
@final
def __finalize__(self, other, method=..., **kwargs) -> Self: ...
@final
def __setattr__(self, name: _str, value) -> None: ...
@final
def __copy__(self, deep: _bool = ...) -> Self: ...
@final
def __deepcopy__(self, memo=...) -> Self: ...
@final
def convert_dtypes(
self,
infer_objects: _bool = True,
convert_string: _bool = True,
convert_integer: _bool = True,
convert_boolean: _bool = True,
convert_floating: _bool = True,
dtype_backend: DtypeBackend = "numpy_nullable",
) -> Self: ...
@final
def resample(
self,
rule: Frequency | dt.timedelta,
axis: Axis | _NoDefaultDoNotUse = 0,
closed: Literal["right", "left"] | None = None,
label: Literal["right", "left"] | None = None,
on: Level | None = None,
level: Level | None = None,
origin: TimeGrouperOrigin | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
group_keys: _bool = False,
) -> DatetimeIndexResampler[Self]: ...
@final
def take(self, indices: TakeIndexer, axis: Axis = 0, **kwargs: Any) -> Self: ...

View File

@ -0,0 +1,15 @@
from pandas.core.groupby.generic import (
DataFrameGroupBy as DataFrameGroupBy,
NamedAgg as NamedAgg,
SeriesGroupBy as SeriesGroupBy,
)
from pandas.core.groupby.groupby import GroupBy as GroupBy
from pandas.core.groupby.grouper import Grouper as Grouper
__all__ = [
"DataFrameGroupBy",
"NamedAgg",
"SeriesGroupBy",
"GroupBy",
"Grouper",
]

View File

@ -0,0 +1,56 @@
from collections.abc import Hashable
import dataclasses
from typing import (
Literal,
TypeAlias,
)
@dataclasses.dataclass(order=True, frozen=True)
class OutputKey:
label: Hashable
position: int
ReductionKernelType: TypeAlias = Literal[
"all",
"any",
"corrwith",
"count",
"first",
"idxmax",
"idxmin",
"last",
"max",
"mean",
"median",
"min",
"nunique",
"prod",
# as long as `quantile`'s signature accepts only
# a single quantile value, it's a reduction.
# GH#27526 might change that.
"quantile",
"sem",
"size",
"skew",
"std",
"sum",
"var",
]
TransformationKernelType: TypeAlias = Literal[
"bfill",
"cumcount",
"cummax",
"cummin",
"cumprod",
"cumsum",
"diff",
"ffill",
"fillna",
"ngroup",
"pct_change",
"rank",
"shift",
]
TransformReductionListType: TypeAlias = ReductionKernelType | TransformationKernelType

View File

@ -0,0 +1,466 @@
from collections.abc import (
Callable,
Hashable,
Iterable,
Iterator,
Sequence,
)
from typing import (
Any,
Concatenate,
Generic,
Literal,
NamedTuple,
Protocol,
TypeVar,
final,
overload,
)
from matplotlib.axes import Axes as PlotAxes
import numpy as np
from pandas.core.frame import DataFrame
from pandas.core.groupby.base import TransformReductionListType
from pandas.core.groupby.groupby import (
GroupBy,
GroupByPlot,
)
from pandas.core.series import Series
from typing_extensions import (
Self,
TypeAlias,
)
from pandas._libs.tslibs.timestamps import Timestamp
from pandas._typing import (
S2,
S3,
AggFuncTypeBase,
AggFuncTypeFrame,
ByT,
CorrelationMethod,
Dtype,
IndexLabel,
Level,
ListLike,
NsmallestNlargestKeep,
P,
Scalar,
TakeIndexer,
WindowingEngine,
WindowingEngineKwargs,
)
AggScalar: TypeAlias = str | Callable[..., Any]
class NamedAgg(NamedTuple):
column: str
aggfunc: AggScalar
class SeriesGroupBy(GroupBy[Series[S2]], Generic[S2, ByT]):
@overload
def aggregate( # pyrefly: ignore
self,
func: Callable[Concatenate[Series[S2], P], S3],
/,
*args,
engine: WindowingEngine = ...,
engine_kwargs: WindowingEngineKwargs = ...,
**kwargs,
) -> Series[S3]: ...
@overload
def aggregate(
self,
func: Callable[[Series], S3],
*args,
engine: WindowingEngine = ...,
engine_kwargs: WindowingEngineKwargs = ...,
**kwargs,
) -> Series[S3]: ...
@overload
def aggregate(
self,
func: list[AggFuncTypeBase],
/,
*args,
engine: WindowingEngine = ...,
engine_kwargs: WindowingEngineKwargs = ...,
**kwargs,
) -> DataFrame: ...
@overload
def aggregate(
self,
func: AggFuncTypeBase | None = ...,
/,
*args,
engine: WindowingEngine = ...,
engine_kwargs: WindowingEngineKwargs = ...,
**kwargs,
) -> Series: ...
agg = aggregate
@overload
def transform(
self,
func: Callable[Concatenate[Series[S2], P], Series[S3]],
/,
*args: Any,
engine: WindowingEngine = ...,
engine_kwargs: WindowingEngineKwargs = ...,
**kwargs: Any,
) -> Series[S3]: ...
@overload
def transform(
self,
func: Callable,
*args: Any,
**kwargs: Any,
) -> Series: ...
@overload
def transform(
self, func: TransformReductionListType, *args, **kwargs
) -> Series: ...
def filter(
self, func: Callable | str, dropna: bool = ..., *args, **kwargs
) -> Series: ...
def nunique(self, dropna: bool = ...) -> Series[int]: ...
# describe delegates to super() method but here it has keyword-only parameters
def describe( # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
self,
*,
percentiles: Iterable[float] | None = ...,
include: Literal["all"] | list[Dtype] | None = ...,
exclude: list[Dtype] | None = ...,
) -> DataFrame: ...
@overload
def value_counts(
self,
normalize: Literal[False] = ...,
sort: bool = ...,
ascending: bool = ...,
bins: int | Sequence[int] | None = ...,
dropna: bool = ...,
) -> Series[int]: ...
@overload
def value_counts(
self,
normalize: Literal[True],
sort: bool = ...,
ascending: bool = ...,
bins: int | Sequence[int] | None = ...,
dropna: bool = ...,
) -> Series[float]: ...
def take(
self,
indices: TakeIndexer,
**kwargs,
) -> Series[S2]: ...
def skew(
self,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
) -> Series: ...
@property
def plot(self) -> GroupByPlot[Self]: ...
def nlargest(
self, n: int = 5, keep: NsmallestNlargestKeep = "first"
) -> Series[S2]: ...
def nsmallest(
self, n: int = 5, keep: NsmallestNlargestKeep = "first"
) -> Series[S2]: ...
def idxmin(self, skipna: bool = True) -> Series: ...
def idxmax(self, skipna: bool = True) -> Series: ...
def corr(
self,
other: Series,
method: CorrelationMethod = ...,
min_periods: int | None = ...,
) -> Series: ...
def cov(
self,
other: Series,
min_periods: int | None = None,
ddof: int | None = 1,
) -> Series: ...
@property
def is_monotonic_increasing(self) -> Series[bool]: ...
@property
def is_monotonic_decreasing(self) -> Series[bool]: ...
def hist(
self,
by: IndexLabel | None = None,
ax: PlotAxes | None = None,
grid: bool = True,
xlabelsize: float | str | None = None,
xrot: float | None = None,
ylabelsize: float | str | None = None,
yrot: float | None = None,
figsize: tuple[float, float] | None = None,
bins: int | Sequence[int] = 10,
backend: str | None = None,
legend: bool = False,
**kwargs,
) -> Series: ... # Series[Axes] but this is not allowed
@property
def dtype(self) -> Series: ...
def unique(self) -> Series: ...
# Overrides that provide more precise return types over the GroupBy class
@final # type: ignore[misc]
def __iter__( # pyright: ignore[reportIncompatibleMethodOverride]
self,
) -> Iterator[tuple[ByT, Series[S2]]]: ...
_TT = TypeVar("_TT", bound=Literal[True, False])
# ty ignore needed because of https://github.com/astral-sh/ty/issues/157#issuecomment-3017337945
class DFCallable1(Protocol[P]): # ty: ignore[invalid-argument-type]
def __call__(
self, df: DataFrame, /, *args: P.args, **kwargs: P.kwargs
) -> Scalar | list | dict: ...
class DFCallable2(Protocol[P]): # ty: ignore[invalid-argument-type]
def __call__(
self, df: DataFrame, /, *args: P.args, **kwargs: P.kwargs
) -> DataFrame | Series: ...
class DFCallable3(Protocol[P]): # ty: ignore[invalid-argument-type]
def __call__(self, df: Iterable, /, *args: P.args, **kwargs: P.kwargs) -> float: ...
class DataFrameGroupBy(GroupBy[DataFrame], Generic[ByT, _TT]):
# error: Overload 3 for "apply" will never be used because its parameters overlap overload 1
@overload # type: ignore[override]
def apply(
self,
func: DFCallable1[P],
/,
*args: P.args,
**kwargs: P.kwargs,
) -> Series: ...
@overload
def apply(
self,
func: DFCallable2[P],
/,
*args: P.args,
**kwargs: P.kwargs,
) -> DataFrame: ...
@overload
def apply(
self,
func: DFCallable3[P],
/,
*args: P.args,
**kwargs: P.kwargs,
) -> DataFrame: ...
# error: overload 1 overlaps overload 2 because of different return types
@overload
def aggregate(self, func: Literal["size"]) -> Series: ... # type: ignore[overload-overlap]
@overload
def aggregate(
self,
func: AggFuncTypeFrame | None = ...,
*args,
engine: WindowingEngine = ...,
engine_kwargs: WindowingEngineKwargs = ...,
**kwargs,
) -> DataFrame: ...
@overload
def aggregate(
self,
func: AggFuncTypeFrame | None = None,
/,
**kwargs,
) -> DataFrame: ...
agg = aggregate
@overload
def transform(
self,
func: Callable[Concatenate[DataFrame, P], DataFrame],
*args: Any,
engine: WindowingEngine = ...,
engine_kwargs: WindowingEngineKwargs = ...,
**kwargs: Any,
) -> DataFrame: ...
@overload
def transform(
self,
func: Callable,
*args: Any,
**kwargs: Any,
) -> DataFrame: ...
@overload
def transform(
self, func: TransformReductionListType, *args, **kwargs
) -> DataFrame: ...
def filter(
self, func: Callable, dropna: bool = ..., *args, **kwargs
) -> DataFrame: ...
@overload
def __getitem__(self, key: Scalar) -> SeriesGroupBy[Any, ByT]: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __getitem__( # pyright: ignore[reportIncompatibleMethodOverride]
self, key: Iterable[Hashable]
) -> DataFrameGroupBy[ByT, _TT]: ...
def nunique(self, dropna: bool = True) -> DataFrame: ...
def idxmax(
self,
skipna: bool = True,
numeric_only: bool = False,
) -> DataFrame: ...
def idxmin(
self,
skipna: bool = True,
numeric_only: bool = False,
) -> DataFrame: ...
@overload
def boxplot(
self,
subplots: Literal[True] = ...,
column: IndexLabel | None = ...,
fontsize: float | str | None = ...,
rot: float = ...,
grid: bool = ...,
ax: PlotAxes | None = ...,
figsize: tuple[float, float] | None = ...,
layout: tuple[int, int] | None = ...,
sharex: bool = ...,
sharey: bool = ...,
backend: str | None = ...,
**kwargs,
) -> Series: ... # Series[PlotAxes] but this is not allowed
@overload
def boxplot(
self,
subplots: Literal[False],
column: IndexLabel | None = ...,
fontsize: float | str | None = ...,
rot: float = ...,
grid: bool = ...,
ax: PlotAxes | None = ...,
figsize: tuple[float, float] | None = ...,
layout: tuple[int, int] | None = ...,
sharex: bool = ...,
sharey: bool = ...,
backend: str | None = ...,
**kwargs,
) -> PlotAxes: ...
@overload
def boxplot(
self,
subplots: bool,
column: IndexLabel | None = ...,
fontsize: float | str | None = ...,
rot: float = ...,
grid: bool = ...,
ax: PlotAxes | None = ...,
figsize: tuple[float, float] | None = ...,
layout: tuple[int, int] | None = ...,
sharex: bool = ...,
sharey: bool = ...,
backend: str | None = ...,
**kwargs,
) -> PlotAxes | Series: ... # Series[PlotAxes]
@overload
def value_counts(
self: DataFrameGroupBy[ByT, Literal[True]],
subset: ListLike | None = ...,
normalize: Literal[False] = ...,
sort: bool = ...,
ascending: bool = ...,
dropna: bool = ...,
) -> Series[int]: ...
@overload
def value_counts(
self: DataFrameGroupBy[ByT, Literal[True]],
subset: ListLike | None,
normalize: Literal[True],
sort: bool = ...,
ascending: bool = ...,
dropna: bool = ...,
) -> Series[float]: ...
@overload
def value_counts(
self: DataFrameGroupBy[ByT, Literal[False]],
subset: ListLike | None = ...,
normalize: Literal[False] = ...,
sort: bool = ...,
ascending: bool = ...,
dropna: bool = ...,
) -> DataFrame: ...
@overload
def value_counts(
self: DataFrameGroupBy[ByT, Literal[False]],
subset: ListLike | None,
normalize: Literal[True],
sort: bool = ...,
ascending: bool = ...,
dropna: bool = ...,
) -> DataFrame: ...
def take(self, indices: TakeIndexer, **kwargs) -> DataFrame: ...
@overload
def skew(
self,
skipna: bool = ...,
numeric_only: bool = ...,
*,
level: Level,
**kwargs,
) -> DataFrame: ...
@overload
def skew(
self,
skipna: bool = ...,
numeric_only: bool = ...,
*,
level: None = ...,
**kwargs,
) -> Series: ...
@property
def plot(self) -> GroupByPlot[Self]: ...
def corr(
self,
method: str | Callable[[np.ndarray, np.ndarray], float] = ...,
min_periods: int = ...,
numeric_only: bool = False,
) -> DataFrame: ...
def cov(
self,
min_periods: int | None = ...,
ddof: int | None = 1,
numeric_only: bool = False,
) -> DataFrame: ...
def hist(
self,
column: IndexLabel | None = None,
by: IndexLabel | None = None,
grid: bool = True,
xlabelsize: float | str | None = None,
xrot: float | None = None,
ylabelsize: float | str | None = None,
yrot: float | None = None,
ax: PlotAxes | None = None,
sharex: bool = False,
sharey: bool = False,
figsize: tuple[float, float] | None = None,
layout: tuple[int, int] | None = None,
bins: int | Sequence[int] = 10,
backend: str | None = None,
legend: bool = False,
**kwargs,
) -> Series: ... # Series[Axes] but this is not allowed
@property
def dtypes(self) -> Series: ...
def __getattr__(self, name: str) -> SeriesGroupBy[Any, ByT]: ...
# Overrides that provide more precise return types over the GroupBy class
@final # type: ignore[misc]
def __iter__( # pyright: ignore[reportIncompatibleMethodOverride]
self,
) -> Iterator[tuple[ByT, DataFrame]]: ...
@overload
def size(self: DataFrameGroupBy[ByT, Literal[True]]) -> Series[int]: ...
@overload
def size(self: DataFrameGroupBy[ByT, Literal[False]]) -> DataFrame: ...
@overload
def size(self: DataFrameGroupBy[Timestamp, Literal[True]]) -> Series[int]: ...
@overload
def size(self: DataFrameGroupBy[Timestamp, Literal[False]]) -> DataFrame: ...

View File

@ -0,0 +1,393 @@
from collections.abc import (
Callable,
Hashable,
Iterable,
Iterator,
Sequence,
)
import datetime as dt
from typing import (
Any,
Generic,
Literal,
TypeVar,
final,
overload,
)
import numpy as np
from pandas.core.base import SelectionMixin
from pandas.core.frame import DataFrame
from pandas.core.groupby import (
generic,
)
from pandas.core.groupby.indexing import (
GroupByIndexingMixin,
GroupByNthSelector,
)
from pandas.core.indexers import BaseIndexer
from pandas.core.indexes.api import Index
from pandas.core.resample import (
DatetimeIndexResamplerGroupby,
PeriodIndexResamplerGroupby,
TimedeltaIndexResamplerGroupby,
)
from pandas.core.series import Series
from pandas.core.window import (
ExpandingGroupby,
ExponentialMovingWindowGroupby,
RollingGroupby,
)
from typing_extensions import (
Concatenate,
Self,
TypeAlias,
)
from pandas._libs.lib import _NoDefaultDoNotUse
from pandas._libs.tslibs import BaseOffset
from pandas._typing import (
S1,
AnyArrayLike,
Axis,
AxisInt,
CalculationMethod,
Dtype,
Frequency,
IndexLabel,
IntervalClosedType,
MaskType,
NDFrameT,
P,
RandomState,
Scalar,
T,
TimedeltaConvertibleTypes,
TimeGrouperOrigin,
TimestampConvention,
TimestampConvertibleTypes,
WindowingEngine,
WindowingEngineKwargs,
npt,
)
from pandas.plotting import PlotAccessor
_ResamplerGroupBy: TypeAlias = (
DatetimeIndexResamplerGroupby[NDFrameT] # ty: ignore[invalid-argument-type]
| PeriodIndexResamplerGroupby[NDFrameT] # ty: ignore[invalid-argument-type]
| TimedeltaIndexResamplerGroupby[NDFrameT] # ty: ignore[invalid-argument-type]
)
class GroupBy(BaseGroupBy[NDFrameT]):
def __getattr__(self, attr: str) -> Any: ...
def apply(self, func: Callable | str, *args, **kwargs) -> NDFrameT: ...
@final
@overload
def any(self: GroupBy[Series], skipna: bool = ...) -> Series[bool]: ...
@overload
def any(self: GroupBy[DataFrame], skipna: bool = ...) -> DataFrame: ...
@final
@overload
def all(self: GroupBy[Series], skipna: bool = ...) -> Series[bool]: ...
@overload
def all(self: GroupBy[DataFrame], skipna: bool = ...) -> DataFrame: ...
@final
@overload
def count(self: GroupBy[Series]) -> Series[int]: ...
@overload
def count(self: GroupBy[DataFrame]) -> DataFrame: ...
@final
def mean(
self,
numeric_only: bool = False,
engine: WindowingEngine = None,
engine_kwargs: WindowingEngineKwargs = None,
) -> NDFrameT: ...
@final
def median(self, numeric_only: bool = False) -> NDFrameT: ...
@final
@overload
def std(
self: GroupBy[Series],
ddof: int = ...,
engine: WindowingEngine = ...,
engine_kwargs: WindowingEngineKwargs = ...,
numeric_only: bool = ...,
) -> Series[float]: ...
@overload
def std(
self: GroupBy[DataFrame],
ddof: int = ...,
engine: WindowingEngine = ...,
engine_kwargs: WindowingEngineKwargs = ...,
numeric_only: bool = ...,
) -> DataFrame: ...
@final
@overload
def var(
self: GroupBy[Series],
ddof: int = ...,
engine: WindowingEngine = ...,
engine_kwargs: WindowingEngineKwargs = ...,
numeric_only: bool = ...,
) -> Series[float]: ...
@overload
def var(
self: GroupBy[DataFrame],
ddof: int = ...,
engine: WindowingEngine = ...,
engine_kwargs: WindowingEngineKwargs = ...,
numeric_only: bool = ...,
) -> DataFrame: ...
@final
@overload
def sem(
self: GroupBy[Series], ddof: int = ..., numeric_only: bool = ...
) -> Series[float]: ...
@overload
def sem(
self: GroupBy[DataFrame], ddof: int = ..., numeric_only: bool = ...
) -> DataFrame: ...
def size(self: GroupBy[Series]) -> Series[int]: ...
@final
def sum(
self,
numeric_only: bool = False,
min_count: int = 0,
engine: WindowingEngine = None,
engine_kwargs: WindowingEngineKwargs = None,
) -> NDFrameT: ...
@final
def prod(self, numeric_only: bool = False, min_count: int = 0) -> NDFrameT: ...
@final
def min(
self,
numeric_only: bool = False,
min_count: int = -1,
engine: WindowingEngine = None,
engine_kwargs: WindowingEngineKwargs = None,
) -> NDFrameT: ...
@final
def max(
self,
numeric_only: bool = False,
min_count: int = -1,
engine: WindowingEngine = None,
engine_kwargs: WindowingEngineKwargs = None,
) -> NDFrameT: ...
@final
def first(
self, numeric_only: bool = False, min_count: int = -1, skipna: bool = True
) -> NDFrameT: ...
@final
def last(
self, numeric_only: bool = False, min_count: int = -1, skipna: bool = True
) -> NDFrameT: ...
@final
def ohlc(self) -> DataFrame: ...
def describe(
self,
percentiles: Iterable[float] | None = ...,
include: Literal["all"] | list[Dtype] | None = ...,
exclude: list[Dtype] | None = ...,
) -> DataFrame: ...
@final
def resample(
self,
rule: Frequency | dt.timedelta,
how: str | None = ...,
fill_method: str | None = ...,
limit: int | None = ...,
kind: str | None = ...,
on: Hashable | None = ...,
*,
closed: Literal["left", "right"] | None = ...,
label: Literal["left", "right"] | None = ...,
axis: Axis = ...,
convention: TimestampConvention | None = ...,
origin: TimeGrouperOrigin | TimestampConvertibleTypes = ...,
offset: TimedeltaConvertibleTypes | None = ...,
group_keys: bool = ...,
**kwargs,
) -> _ResamplerGroupBy[NDFrameT]: ...
@final
def rolling(
self,
window: int | dt.timedelta | str | BaseOffset | BaseIndexer | None = ...,
min_periods: int | None = None,
center: bool | None = False,
win_type: str | None = None,
axis: Axis = 0,
on: str | Index | None = None,
closed: IntervalClosedType | None = None,
method: CalculationMethod = "single",
*,
selection: IndexLabel | None = None,
) -> RollingGroupby[NDFrameT]: ...
@final
def expanding(
self,
min_periods: int = ...,
axis: Axis = ...,
method: CalculationMethod = ...,
selection: IndexLabel | None = ...,
) -> ExpandingGroupby[NDFrameT]: ...
@final
def ewm(
self,
com: float | None = ...,
span: float | None = ...,
halflife: TimedeltaConvertibleTypes | None = ...,
alpha: float | None = ...,
min_periods: int | None = ...,
adjust: bool = ...,
ignore_na: bool = ...,
axis: Axis = ...,
times: str | np.ndarray | Series | np.timedelta64 | None = ...,
method: CalculationMethod = ...,
*,
selection: IndexLabel | None = ...,
) -> ExponentialMovingWindowGroupby[NDFrameT]: ...
@final
def ffill(self, limit: int | None = ...) -> NDFrameT: ...
@final
def bfill(self, limit: int | None = ...) -> NDFrameT: ...
@final
@property
def nth(self) -> GroupByNthSelector[Self]: ...
@final
def quantile(
self,
q: float | AnyArrayLike = 0.5,
interpolation: str = "linear",
numeric_only: bool = False,
) -> NDFrameT: ...
@final
def ngroup(self, ascending: bool = True) -> Series[int]: ...
@final
def cumcount(self, ascending: bool = True) -> Series[int]: ...
@final
def rank(
self,
method: str = "average",
ascending: bool = True,
na_option: str = "keep",
pct: bool = False,
axis: AxisInt | _NoDefaultDoNotUse = 0,
) -> NDFrameT: ...
@final
def cumprod(
self, axis: Axis | _NoDefaultDoNotUse = ..., *args, **kwargs
) -> NDFrameT: ...
@final
def cumsum(
self, axis: Axis | _NoDefaultDoNotUse = ..., *args, **kwargs
) -> NDFrameT: ...
@final
def cummin(
self,
axis: AxisInt | _NoDefaultDoNotUse = ...,
numeric_only: bool = ...,
**kwargs,
) -> NDFrameT: ...
@final
def cummax(
self,
axis: AxisInt | _NoDefaultDoNotUse = ...,
numeric_only: bool = ...,
**kwargs,
) -> NDFrameT: ...
@final
def shift(
self,
periods: int | Sequence[int] = 1,
freq: Frequency | None = ...,
axis: Axis | _NoDefaultDoNotUse = 0,
fill_value=...,
suffix: str | None = ...,
) -> NDFrameT: ...
@final
def diff(
self, periods: int = 1, axis: AxisInt | _NoDefaultDoNotUse = 0
) -> NDFrameT: ...
@final
def pct_change(
self,
periods: int = ...,
fill_method: Literal["bfill", "ffill"] | None | _NoDefaultDoNotUse = ...,
limit: int | None | _NoDefaultDoNotUse = ...,
freq=...,
axis: Axis | _NoDefaultDoNotUse = ...,
) -> NDFrameT: ...
@final
def head(self, n: int = ...) -> NDFrameT: ...
@final
def tail(self, n: int = ...) -> NDFrameT: ...
@final
def sample(
self,
n: int | None = None,
frac: float | None = None,
replace: bool = False,
weights: Sequence | Series | None = ...,
random_state: RandomState | None = ...,
) -> NDFrameT: ...
_GroupByT = TypeVar("_GroupByT", bound=GroupBy)
# GroupByPlot does not really inherit from PlotAccessor but it delegates
# to it using __call__ and __getattr__. We lie here to avoid repeating the
# whole stub of PlotAccessor
@final
class GroupByPlot(PlotAccessor, Generic[_GroupByT]):
def __init__(self, groupby: _GroupByT) -> None: ...
# The following methods are inherited from the fake parent class PlotAccessor
# def __call__(self, *args, **kwargs): ...
# def __getattr__(self, name: str): ...
class BaseGroupBy(SelectionMixin[NDFrameT], GroupByIndexingMixin):
@final
def __len__(self) -> int: ...
@final
def __repr__(self) -> str: ... # noqa: PYI029 __repr__ here is final
@final
@property
def groups(self) -> dict[Hashable, Index]: ...
@final
@property
def ngroups(self) -> int: ...
@final
@property
def indices(self) -> dict[Hashable, Index | npt.NDArray[np.int_] | list[int]]: ...
@overload
def pipe(
self,
func: Callable[Concatenate[Self, P], T],
*args: P.args,
**kwargs: P.kwargs,
) -> T: ...
@overload
def pipe(
self,
func: tuple[Callable[..., T], str],
*args: Any,
**kwargs: Any,
) -> T: ...
@final
def get_group(self, name) -> NDFrameT: ...
@final
def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]: ...
@overload
def __getitem__(self: BaseGroupBy[DataFrame], key: Scalar) -> generic.SeriesGroupBy: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __getitem__(
self: BaseGroupBy[DataFrame], key: Iterable[Hashable]
) -> generic.DataFrameGroupBy: ...
@overload
def __getitem__(
self: BaseGroupBy[Series[S1]],
idx: list[str] | Index | Series[S1] | MaskType | tuple[Hashable | slice, ...],
) -> generic.SeriesGroupBy: ...
@overload
def __getitem__(self: BaseGroupBy[Series[S1]], idx: Scalar) -> S1: ...

View File

@ -0,0 +1,74 @@
from collections.abc import (
Hashable,
Iterator,
)
from typing import (
final,
overload,
)
import numpy as np
from pandas import (
DataFrame,
Index,
Series,
)
from pandas.core.resample import TimeGrouper
from typing_extensions import Self
from pandas._libs.lib import _NoDefaultDoNotUse
from pandas._typing import (
ArrayLike,
Axis,
Frequency,
Incomplete,
KeysArgType,
Level,
ListLikeHashable,
npt,
)
from pandas.util._decorators import cache_readonly
class Grouper:
key: KeysArgType | None
level: Level | ListLikeHashable[Level] | None
freq: Frequency | None
axis: Axis
sort: bool
dropna: bool
binner: Incomplete
@overload
def __new__(
cls,
key: KeysArgType | None = ...,
level: Level | ListLikeHashable[Level] | None = ...,
axis: Axis | _NoDefaultDoNotUse = ...,
sort: bool = ...,
dropna: bool = ...,
) -> Self: ...
@overload
def __new__(cls, *args, freq: Frequency, **kwargs) -> TimeGrouper: ...
@final
def __repr__(self) -> str: ... # noqa: PYI029 __repr__ here is final
@final
class Grouping:
level: Level | None
obj: DataFrame | Series | None
in_axis: bool
grouping_vector: Incomplete
def __iter__(self) -> Iterator[Hashable]: ...
@cache_readonly
def name(self) -> Hashable: ...
@cache_readonly
def ngroups(self) -> int: ...
@cache_readonly
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: ...
@property
def codes(self) -> npt.NDArray[np.signedinteger]: ...
@cache_readonly
def group_arraylike(self) -> ArrayLike: ...
@cache_readonly
def result_index(self) -> Index: ...
@cache_readonly
def group_index(self) -> Index: ...

View File

@ -0,0 +1,32 @@
from typing import (
Any,
Generic,
Literal,
TypeVar,
)
from pandas import (
DataFrame,
Series,
)
from pandas.core.groupby import groupby
from pandas._typing import PositionalIndexer
_GroupByT = TypeVar("_GroupByT", bound=groupby.GroupBy[Any])
class GroupByIndexingMixin: ...
class GroupByPositionalSelector:
groupby_object: groupby.GroupBy
def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series: ...
class GroupByNthSelector(Generic[_GroupByT]):
groupby_object: _GroupByT
def __call__(
self,
n: PositionalIndexer | tuple,
dropna: Literal["any", "all", None] = ...,
) -> DataFrame | Series: ...
def __getitem__(self, n: PositionalIndexer | tuple) -> DataFrame | Series: ...

View File

@ -0,0 +1,103 @@
from collections.abc import (
Callable,
Hashable,
Iterator,
)
from typing import (
Generic,
final,
)
import numpy as np
from pandas import (
Index,
Series,
)
from pandas.core.groupby import grouper
from pandas._typing import (
ArrayLike,
AxisInt,
Incomplete,
NDFrameT,
Shape,
T,
npt,
)
from pandas.util._decorators import cache_readonly
class BaseGrouper:
axis: Index
dropna: bool
@property
def groupings(self) -> list[grouper.Grouping]: ...
@property
def shape(self) -> Shape: ...
def __iter__(self) -> Iterator: ...
@property
def nkeys(self) -> int: ...
def get_iterator(
self, data: NDFrameT, axis: AxisInt = ...
) -> Iterator[tuple[Hashable, NDFrameT]]: ...
@final
@cache_readonly
def group_keys_seq(self): ...
@cache_readonly
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: ...
@final
def result_ilocs(self) -> npt.NDArray[np.intp]: ...
@final
@property
def codes(self) -> list[npt.NDArray[np.signedinteger]]: ...
@property
def levels(self) -> list[Index]: ...
@property
def names(self) -> list: ...
@final
def size(self) -> Series: ...
@cache_readonly
def groups(self) -> dict[Hashable, np.ndarray]: ...
@final
@cache_readonly
def is_monotonic(self) -> bool: ...
@final
@cache_readonly
def has_dropped_na(self) -> bool: ...
@cache_readonly
def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]: ...
@cache_readonly
def codes_info(self) -> npt.NDArray[np.intp]: ...
@final
@cache_readonly
def ngroups(self) -> int: ...
@property
def reconstructed_codes(self) -> list[npt.NDArray[np.intp]]: ...
@cache_readonly
def result_index(self) -> Index: ...
@final
def get_group_levels(self) -> list[ArrayLike]: ...
@final
def agg_series(
self,
obj: Series,
func: Callable[[Series], object],
preserve_dtype: bool = ...,
) -> ArrayLike: ...
@final
def apply_groupwise(
self, f: Callable[[NDFrameT], T], data: NDFrameT, axis: AxisInt = ...
) -> tuple[list[T], bool]: ...
class BinGrouper(BaseGrouper):
bins: npt.NDArray[np.int64]
binlabels: Index
indexer: npt.NDArray[np.intp]
@cache_readonly
def indices(self) -> dict[Incomplete, list[int]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
class DataSplitter(Generic[NDFrameT]):
data: NDFrameT
labels: npt.NDArray[np.intp]
ngroups: int
axis: AxisInt
def __iter__(self) -> Iterator[NDFrameT]: ...

View File

@ -0,0 +1,44 @@
import numpy as np
def check_array_indexer(arrayArrayLike, indexer): ...
class BaseIndexer:
def __init__(
self,
index_array: np.ndarray | None = ...,
window_size: int = ...,
**kwargs,
) -> None: ...
def get_window_bounds(
self,
num_values: int = ...,
min_periods: int | None = ...,
center: bool | None = ...,
closed: str | None = ...,
) -> tuple[np.ndarray, np.ndarray]: ...
class VariableOffsetWindowIndexer(BaseIndexer):
def __init__(
self,
index_array: np.ndarray | None = ...,
window_size: int = ...,
index=...,
offset=...,
**kwargs,
) -> None: ...
def get_window_bounds(
self,
num_values: int = ...,
min_periods: int | None = ...,
center: bool | None = ...,
closed: str | None = ...,
) -> tuple[np.ndarray, np.ndarray]: ...
class FixedForwardWindowIndexer(BaseIndexer):
def get_window_bounds(
self,
num_values: int = ...,
min_periods: int | None = ...,
center: bool | None = ...,
closed: str | None = ...,
) -> tuple[np.ndarray, np.ndarray]: ...

View File

@ -0,0 +1,428 @@
import datetime as dt
from datetime import (
timedelta,
tzinfo as _tzinfo,
)
from typing import (
Generic,
Literal,
TypeVar,
)
import numpy as np
from pandas import (
DatetimeIndex,
Index,
PeriodIndex,
Timedelta,
TimedeltaIndex,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import (
DatetimeArray,
PeriodArray,
)
from pandas.core.base import NoNewAttributesMixin
from pandas.core.frame import DataFrame
from pandas.core.series import (
PeriodSeries,
Series,
TimedeltaSeries,
TimestampSeries,
)
from pandas._libs.tslibs import BaseOffset
from pandas._libs.tslibs.offsets import DateOffset
from pandas._typing import (
TimeAmbiguous,
TimeNonexistent,
TimestampConvention,
TimeUnit,
TimeZones,
np_1darray,
np_ndarray_bool,
)
class Properties(PandasDelegate, NoNewAttributesMixin): ...
_DTFieldOpsReturnType = TypeVar("_DTFieldOpsReturnType", bound=Series[int] | Index[int])
class _DayLikeFieldOps(Generic[_DTFieldOpsReturnType]):
@property
def year(self) -> _DTFieldOpsReturnType: ...
@property
def month(self) -> _DTFieldOpsReturnType: ...
@property
def day(self) -> _DTFieldOpsReturnType: ...
@property
def hour(self) -> _DTFieldOpsReturnType: ...
@property
def minute(self) -> _DTFieldOpsReturnType: ...
@property
def second(self) -> _DTFieldOpsReturnType: ...
@property
def weekday(self) -> _DTFieldOpsReturnType: ...
@property
def dayofweek(self) -> _DTFieldOpsReturnType: ...
@property
def day_of_week(self) -> _DTFieldOpsReturnType: ...
@property
def dayofyear(self) -> _DTFieldOpsReturnType: ...
@property
def day_of_year(self) -> _DTFieldOpsReturnType: ...
@property
def quarter(self) -> _DTFieldOpsReturnType: ...
@property
def days_in_month(self) -> _DTFieldOpsReturnType: ...
@property
def daysinmonth(self) -> _DTFieldOpsReturnType: ...
class _MiniSeconds(Generic[_DTFieldOpsReturnType]):
@property
def microsecond(self) -> _DTFieldOpsReturnType: ...
@property
def nanosecond(self) -> _DTFieldOpsReturnType: ...
class _DatetimeFieldOps(
_DayLikeFieldOps[_DTFieldOpsReturnType], _MiniSeconds[_DTFieldOpsReturnType]
): ...
_DTBoolOpsReturnType = TypeVar(
"_DTBoolOpsReturnType", bound=Series[bool] | np_1darray[np.bool]
)
class _IsLeapYearProperty(Generic[_DTBoolOpsReturnType]):
@property
def is_leap_year(self) -> _DTBoolOpsReturnType: ...
class _DatetimeBoolOps(
_IsLeapYearProperty[_DTBoolOpsReturnType], Generic[_DTBoolOpsReturnType]
):
@property
def is_month_start(self) -> _DTBoolOpsReturnType: ...
@property
def is_month_end(self) -> _DTBoolOpsReturnType: ...
@property
def is_quarter_start(self) -> _DTBoolOpsReturnType: ...
@property
def is_quarter_end(self) -> _DTBoolOpsReturnType: ...
@property
def is_year_start(self) -> _DTBoolOpsReturnType: ...
@property
def is_year_end(self) -> _DTBoolOpsReturnType: ...
_DTFreqReturnType = TypeVar("_DTFreqReturnType", bound=str | BaseOffset)
class _FreqProperty(Generic[_DTFreqReturnType]):
@property
def freq(self) -> _DTFreqReturnType | None: ...
class _TZProperty:
@property
def tz(self) -> _tzinfo | None: ...
class _DatetimeObjectOps(
_FreqProperty[_DTFreqReturnType], _TZProperty, Generic[_DTFreqReturnType]
): ...
_DTOtherOpsDateReturnType = TypeVar(
"_DTOtherOpsDateReturnType", bound=Series[dt.date] | np_1darray[np.object_]
)
_DTOtherOpsTimeReturnType = TypeVar(
"_DTOtherOpsTimeReturnType", bound=Series[dt.time] | np_1darray[np.object_]
)
class _DatetimeOtherOps(Generic[_DTOtherOpsDateReturnType, _DTOtherOpsTimeReturnType]):
@property
def date(self) -> _DTOtherOpsDateReturnType: ...
@property
def time(self) -> _DTOtherOpsTimeReturnType: ...
@property
def timetz(self) -> _DTOtherOpsTimeReturnType: ...
class _DatetimeLikeOps(
_DatetimeFieldOps[_DTFieldOpsReturnType],
_DatetimeObjectOps[_DTFreqReturnType],
_DatetimeBoolOps[_DTBoolOpsReturnType],
_DatetimeOtherOps[_DTOtherOpsDateReturnType, _DTOtherOpsTimeReturnType],
Generic[
_DTFieldOpsReturnType,
_DTBoolOpsReturnType,
_DTOtherOpsDateReturnType,
_DTOtherOpsTimeReturnType,
_DTFreqReturnType,
],
): ...
# Ideally, the rounding methods would return TimestampSeries when `Series.dt.method`
# is invoked, but because of how Series.dt is hooked in and that we may not know the
# type of the series, we don't know which kind of series was ...ed
# in to the dt accessor
_DTTimestampTimedeltaReturnType = TypeVar(
"_DTTimestampTimedeltaReturnType",
bound=Series | TimestampSeries | TimedeltaSeries | DatetimeIndex | TimedeltaIndex,
)
class _DatetimeRoundingMethods(Generic[_DTTimestampTimedeltaReturnType]):
def round(
self,
freq: str | BaseOffset | None,
ambiguous: Literal["raise", "infer", "NaT"] | bool | np_ndarray_bool = ...,
nonexistent: (
Literal["shift_forward", "shift_backward", "NaT", "raise"]
| timedelta
| Timedelta
) = ...,
) -> _DTTimestampTimedeltaReturnType: ...
def floor(
self,
freq: str | BaseOffset | None,
ambiguous: Literal["raise", "infer", "NaT"] | bool | np_ndarray_bool = ...,
nonexistent: (
Literal["shift_forward", "shift_backward", "NaT", "raise"]
| timedelta
| Timedelta
) = ...,
) -> _DTTimestampTimedeltaReturnType: ...
def ceil(
self,
freq: str | BaseOffset | None,
ambiguous: Literal["raise", "infer", "NaT"] | bool | np_ndarray_bool = ...,
nonexistent: (
Literal["shift_forward", "shift_backward", "NaT", "raise"]
| timedelta
| Timedelta
) = ...,
) -> _DTTimestampTimedeltaReturnType: ...
_DTNormalizeReturnType = TypeVar(
"_DTNormalizeReturnType", TimestampSeries, DatetimeIndex
)
_DTStrKindReturnType = TypeVar("_DTStrKindReturnType", bound=Series[str] | Index)
_DTToPeriodReturnType = TypeVar(
"_DTToPeriodReturnType", bound=PeriodSeries | PeriodIndex
)
class _DatetimeLikeNoTZMethods(
_DatetimeRoundingMethods[_DTTimestampTimedeltaReturnType],
Generic[
_DTTimestampTimedeltaReturnType,
_DTNormalizeReturnType,
_DTStrKindReturnType,
_DTToPeriodReturnType,
],
):
def to_period(
self, freq: str | BaseOffset | None = ...
) -> _DTToPeriodReturnType: ...
def tz_localize(
self,
tz: TimeZones,
ambiguous: TimeAmbiguous = ...,
nonexistent: TimeNonexistent = ...,
) -> _DTNormalizeReturnType: ...
def tz_convert(self, tz: TimeZones) -> _DTNormalizeReturnType: ...
def normalize(self) -> _DTNormalizeReturnType: ...
def strftime(self, date_format: str) -> _DTStrKindReturnType: ...
def month_name(self, locale: str | None = ...) -> _DTStrKindReturnType: ...
def day_name(self, locale: str | None = ...) -> _DTStrKindReturnType: ...
class _DatetimeNoTZProperties(
_DatetimeLikeOps[
_DTFieldOpsReturnType,
_DTBoolOpsReturnType,
_DTOtherOpsDateReturnType,
_DTOtherOpsTimeReturnType,
_DTFreqReturnType,
],
_DatetimeLikeNoTZMethods[
_DTTimestampTimedeltaReturnType,
_DTNormalizeReturnType,
_DTStrKindReturnType,
_DTToPeriodReturnType,
],
Generic[
_DTFieldOpsReturnType,
_DTBoolOpsReturnType,
_DTTimestampTimedeltaReturnType,
_DTOtherOpsDateReturnType,
_DTOtherOpsTimeReturnType,
_DTFreqReturnType,
_DTNormalizeReturnType,
_DTStrKindReturnType,
_DTToPeriodReturnType,
],
): ...
class DatetimeProperties(
Properties,
_DatetimeNoTZProperties[
_DTFieldOpsReturnType,
_DTBoolOpsReturnType,
_DTTimestampTimedeltaReturnType,
_DTOtherOpsDateReturnType,
_DTOtherOpsTimeReturnType,
_DTFreqReturnType,
_DTNormalizeReturnType,
_DTStrKindReturnType,
_DTToPeriodReturnType,
],
Generic[
_DTFieldOpsReturnType,
_DTBoolOpsReturnType,
_DTTimestampTimedeltaReturnType,
_DTOtherOpsDateReturnType,
_DTOtherOpsTimeReturnType,
_DTFreqReturnType,
_DTNormalizeReturnType,
_DTStrKindReturnType,
_DTToPeriodReturnType,
],
):
def to_pydatetime(self) -> np_1darray[np.object_]: ...
def isocalendar(self) -> DataFrame: ...
@property
def unit(self) -> TimeUnit: ...
def as_unit(self, unit: TimeUnit) -> _DTTimestampTimedeltaReturnType: ...
_TDNoRoundingMethodReturnType = TypeVar(
"_TDNoRoundingMethodReturnType", bound=Series[int] | Index
)
_TDTotalSecondsReturnType = TypeVar(
"_TDTotalSecondsReturnType", bound=Series[float] | Index
)
class _TimedeltaPropertiesNoRounding(
Generic[_TDNoRoundingMethodReturnType, _TDTotalSecondsReturnType]
):
def to_pytimedelta(self) -> np_1darray[np.object_]: ...
@property
def components(self) -> DataFrame: ...
@property
def days(self) -> _TDNoRoundingMethodReturnType: ...
@property
def seconds(self) -> _TDNoRoundingMethodReturnType: ...
@property
def microseconds(self) -> _TDNoRoundingMethodReturnType: ...
@property
def nanoseconds(self) -> _TDNoRoundingMethodReturnType: ...
def total_seconds(self) -> _TDTotalSecondsReturnType: ...
class TimedeltaProperties(
Properties,
_TimedeltaPropertiesNoRounding[Series[int], Series[float]],
_DatetimeRoundingMethods[TimedeltaSeries],
):
@property
def unit(self) -> TimeUnit: ...
def as_unit(self, unit: TimeUnit) -> TimedeltaSeries: ...
_PeriodDTReturnTypes = TypeVar(
"_PeriodDTReturnTypes", bound=TimestampSeries | DatetimeIndex
)
_PeriodIntReturnTypes = TypeVar("_PeriodIntReturnTypes", bound=Series[int] | Index[int])
_PeriodStrReturnTypes = TypeVar("_PeriodStrReturnTypes", bound=Series[str] | Index)
_PeriodDTAReturnTypes = TypeVar(
"_PeriodDTAReturnTypes", bound=DatetimeArray | DatetimeIndex
)
_PeriodPAReturnTypes = TypeVar("_PeriodPAReturnTypes", bound=PeriodArray | PeriodIndex)
class _PeriodProperties(
Generic[
_PeriodDTReturnTypes,
_PeriodIntReturnTypes,
_PeriodStrReturnTypes,
_PeriodDTAReturnTypes,
_PeriodPAReturnTypes,
]
):
@property
def start_time(self) -> _PeriodDTReturnTypes: ...
@property
def end_time(self) -> _PeriodDTReturnTypes: ...
@property
def qyear(self) -> _PeriodIntReturnTypes: ...
def strftime(self, date_format: str) -> _PeriodStrReturnTypes: ...
def to_timestamp(
self,
freq: str | DateOffset | None = ...,
how: TimestampConvention = ...,
) -> _PeriodDTAReturnTypes: ...
def asfreq(
self,
freq: str | DateOffset | None = ...,
how: Literal["E", "END", "FINISH", "S", "START", "BEGIN"] = ...,
) -> _PeriodPAReturnTypes: ...
class PeriodIndexFieldOps(
_DayLikeFieldOps[Index[int]],
_PeriodProperties[DatetimeIndex, Index[int], Index, DatetimeIndex, PeriodIndex],
): ...
class PeriodProperties(
Properties,
_PeriodProperties[
TimestampSeries, Series[int], Series[str], DatetimeArray, PeriodArray
],
_DatetimeFieldOps[Series[int]],
_IsLeapYearProperty,
_FreqProperty[BaseOffset],
): ...
class CombinedDatetimelikeProperties(
DatetimeProperties[
Series[int],
Series[bool],
Series,
Series[dt.date],
Series[dt.time],
str,
TimestampSeries,
Series[str],
PeriodSeries,
],
_TimedeltaPropertiesNoRounding[Series[int], Series[float]],
_PeriodProperties,
): ...
class TimestampProperties(
DatetimeProperties[
Series[int],
Series[bool],
TimestampSeries,
Series[dt.date],
Series[dt.time],
str,
TimestampSeries,
Series[str],
PeriodSeries,
]
): ...
class DatetimeIndexProperties(
Properties,
_DatetimeNoTZProperties[
Index[int],
np_1darray[np.bool],
DatetimeIndex,
np_1darray[np.object_],
np_1darray[np.object_],
BaseOffset,
DatetimeIndex,
Index,
PeriodIndex,
],
_TZProperty,
):
@property
def is_normalized(self) -> bool: ...
@property
def tzinfo(self) -> _tzinfo | None: ...
def to_pydatetime(self) -> np_1darray[np.object_]: ...
def std(
self, axis: int | None = ..., ddof: int = ..., skipna: bool = ...
) -> Timedelta: ...
class TimedeltaIndexProperties(
Properties,
_TimedeltaPropertiesNoRounding[Index, Index],
_DatetimeRoundingMethods[TimedeltaIndex],
): ...

View File

@ -0,0 +1,8 @@
from pandas.core.indexes.base import Index as Index
from pandas.core.indexes.category import CategoricalIndex as CategoricalIndex
from pandas.core.indexes.datetimes import DatetimeIndex as DatetimeIndex
from pandas.core.indexes.interval import IntervalIndex as IntervalIndex
from pandas.core.indexes.multi import MultiIndex as MultiIndex
from pandas.core.indexes.period import PeriodIndex as PeriodIndex
from pandas.core.indexes.range import RangeIndex as RangeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex as TimedeltaIndex

View File

@ -0,0 +1,535 @@
from builtins import str as _str
from collections.abc import (
Callable,
Hashable,
Iterable,
Iterator,
Sequence,
)
from datetime import (
datetime,
timedelta,
)
from typing import (
Any,
ClassVar,
Generic,
Literal,
final,
overload,
type_check_only,
)
import numpy as np
from pandas import (
DataFrame,
DatetimeIndex,
Interval,
IntervalIndex,
MultiIndex,
Period,
PeriodDtype,
PeriodIndex,
Series,
TimedeltaIndex,
)
from pandas.core.arrays import ExtensionArray
from pandas.core.base import IndexOpsMixin
from pandas.core.strings.accessor import StringMethods
from typing_extensions import (
Never,
Self,
)
from pandas._libs.interval import _OrderableT
from pandas._typing import (
C2,
S1,
AnyAll,
ArrayLike,
AxesData,
DropKeep,
Dtype,
DtypeArg,
DTypeLike,
DtypeObj,
GenericT,
GenericT_co,
HashableT,
IgnoreRaise,
Label,
Level,
MaskType,
NaPosition,
ReindexMethod,
Scalar,
SequenceNotStr,
SliceType,
SupportsDType,
TimedeltaDtypeArg,
TimestampDtypeArg,
np_1darray,
np_ndarray_anyint,
np_ndarray_complex,
np_ndarray_float,
type_t,
)
class InvalidIndexError(Exception): ...
class Index(IndexOpsMixin[S1]):
__hash__: ClassVar[None] # type: ignore[assignment]
# overloads with additional dtypes
@overload
def __new__( # pyright: ignore[reportOverlappingOverload]
cls,
data: Sequence[int | np.integer] | IndexOpsMixin[int] | np_ndarray_anyint,
*,
dtype: Literal["int"] | type_t[int | np.integer] = ...,
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> Index[int]: ...
@overload
def __new__(
cls,
data: AxesData,
*,
dtype: Literal["int"] | type_t[int | np.integer],
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> Index[int]: ...
@overload
def __new__(
cls,
data: Sequence[float | np.floating] | IndexOpsMixin[float] | np_ndarray_float,
*,
dtype: Literal["float"] | type_t[float | np.floating] = ...,
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> Index[float]: ...
@overload
def __new__(
cls,
data: AxesData,
*,
dtype: Literal["float"] | type_t[float | np.floating],
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> Index[float]: ...
@overload
def __new__(
cls,
data: (
Sequence[complex | np.complexfloating]
| IndexOpsMixin[complex]
| np_ndarray_complex
),
*,
dtype: Literal["complex"] | type_t[complex | np.complexfloating] = ...,
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> Index[complex]: ...
@overload
def __new__(
cls,
data: AxesData,
*,
dtype: Literal["complex"] | type_t[complex | np.complexfloating],
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> Index[complex]: ...
# special overloads with dedicated Index-subclasses
@overload
def __new__(
cls,
data: Sequence[np.datetime64 | datetime] | IndexOpsMixin[datetime],
*,
dtype: TimestampDtypeArg = ...,
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> DatetimeIndex: ...
@overload
def __new__(
cls,
data: AxesData,
*,
dtype: TimestampDtypeArg,
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> DatetimeIndex: ...
@overload
def __new__(
cls,
data: Sequence[Period] | IndexOpsMixin[Period],
*,
dtype: PeriodDtype = ...,
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> PeriodIndex: ...
@overload
def __new__(
cls,
data: AxesData,
*,
dtype: PeriodDtype,
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> PeriodIndex: ...
@overload
def __new__(
cls,
data: Sequence[np.timedelta64 | timedelta] | IndexOpsMixin[timedelta],
*,
dtype: TimedeltaDtypeArg = ...,
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> TimedeltaIndex: ...
@overload
def __new__(
cls,
data: AxesData,
*,
dtype: TimedeltaDtypeArg,
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> TimedeltaIndex: ...
@overload
def __new__(
cls,
data: Sequence[Interval[_OrderableT]] | IndexOpsMixin[Interval[_OrderableT]],
*,
dtype: Literal["Interval"] = ...,
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> IntervalIndex[Interval[_OrderableT]]: ...
@overload
def __new__(
cls,
data: AxesData,
*,
dtype: Literal["Interval"],
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> IntervalIndex[Interval[Any]]: ...
# generic overloads
@overload
def __new__(
cls,
data: Iterable[S1] | IndexOpsMixin[S1],
*,
dtype: type[S1] = ...,
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> Self: ...
@overload
def __new__(
cls,
data: AxesData = ...,
*,
dtype: type[S1],
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> Self: ...
# fallback overload
@overload
def __new__(
cls,
data: AxesData,
*,
dtype: Dtype = ...,
copy: bool = ...,
name: Hashable = ...,
tupleize_cols: bool = ...,
) -> Self: ...
@property
def str(
self,
) -> StringMethods[
Self,
MultiIndex,
np_1darray[np.bool],
Index[list[_str]],
Index[int],
Index[bytes],
Index[_str],
Index,
]: ...
@final
def is_(self, other) -> bool: ...
def __len__(self) -> int: ...
def __array__(
self, dtype: _str | np.dtype = ..., copy: bool | None = ...
) -> np_1darray: ...
def __array_wrap__(self, result, context=...): ...
@property
def dtype(self) -> DtypeObj: ...
@final
def ravel(self, order: _str = ...): ...
def view(self, cls=...): ...
def astype(self, dtype: DtypeArg, copy: bool = True) -> Index: ...
def take(
self,
indices,
axis: int = 0,
allow_fill: bool = True,
fill_value: Scalar | None = None,
**kwargs,
): ...
def repeat(self, repeats, axis=...): ...
def copy(self, name: Hashable = ..., deep: bool = False) -> Self: ...
@final
def __copy__(self, **kwargs): ...
@final
def __deepcopy__(self, memo=...): ...
def format(
self, name: bool = ..., formatter: Callable | None = ..., na_rep: _str = ...
) -> list[_str]: ...
def to_flat_index(self): ...
def to_series(self, index=..., name: Hashable = ...) -> Series: ...
def to_frame(self, index: bool = True, name=...) -> DataFrame: ...
@property
def name(self) -> Hashable | None: ...
@name.setter
def name(self, value: Hashable) -> None: ...
@property
def names(self) -> list[Hashable | None]: ...
@names.setter
def names(self, names: SequenceNotStr[Hashable | None]) -> None: ...
def set_names(self, names, *, level=..., inplace: bool = ...): ...
@overload
def rename(self, name, *, inplace: Literal[False] = False) -> Self: ...
@overload
def rename(self, name, *, inplace: Literal[True]) -> None: ...
@property
def nlevels(self) -> int: ...
def get_level_values(self, level: int | _str) -> Index: ...
def droplevel(self, level: Level | list[Level] = 0): ...
@property
def is_monotonic_increasing(self) -> bool: ...
@property
def is_monotonic_decreasing(self) -> bool: ...
@property
def is_unique(self) -> bool: ...
@property
def has_duplicates(self) -> bool: ...
@property
def inferred_type(self) -> _str: ...
def __reduce__(self): ...
@property
def hasnans(self) -> bool: ...
@final
def isna(self): ...
isnull = ...
@final
def notna(self): ...
notnull = ...
def fillna(self, value=...): ...
def dropna(self, how: AnyAll = "any") -> Self: ...
def unique(self, level=...) -> Self: ...
def drop_duplicates(self, *, keep: DropKeep = ...) -> Self: ...
def duplicated(self, keep: DropKeep = "first") -> np_1darray[np.bool]: ...
def __and__(self, other: Never) -> Never: ...
def __rand__(self, other: Never) -> Never: ...
def __or__(self, other: Never) -> Never: ...
def __ror__(self, other: Never) -> Never: ...
def __xor__(self, other: Never) -> Never: ...
def __rxor__(self, other: Never) -> Never: ...
def __neg__(self) -> Self: ...
@final
def __nonzero__(self) -> None: ...
__bool__ = ...
def union(
self, other: list[HashableT] | Self, sort: bool | None = None
) -> Index: ...
def intersection(
self, other: list[S1] | Self, sort: bool | None = False
) -> Self: ...
def difference(self, other: list | Self, sort: bool | None = None) -> Self: ...
def symmetric_difference(
self,
other: list[S1] | Self,
result_name: Hashable = ...,
sort: bool | None = None,
) -> Self: ...
def get_loc(self, key: Label) -> int | slice | np_1darray[np.bool]: ...
def get_indexer(
self, target, method: ReindexMethod | None = ..., limit=..., tolerance=...
): ...
def reindex(
self,
target,
method: ReindexMethod | None = ...,
level=...,
limit=...,
tolerance=...,
): ...
def join(
self,
other,
*,
how: _str = ...,
level=...,
return_indexers: bool = ...,
sort: bool = ...,
): ...
@property
def values(self) -> np_1darray: ...
@property
def array(self) -> ExtensionArray: ...
def memory_usage(self, deep: bool = False): ...
def where(self, cond, other: Scalar | ArrayLike | None = None): ...
def __contains__(self, key) -> bool: ...
@final
def __setitem__(self, key, value) -> None: ...
@overload
def __getitem__(
self,
idx: slice | np_ndarray_anyint | Sequence[int] | Index | MaskType,
) -> Self: ...
@overload
def __getitem__(self, idx: int | tuple[np_ndarray_anyint, ...]) -> S1: ...
@overload
def append(
self: Index[C2], other: Index[C2] | Sequence[Index[C2]]
) -> Index[C2]: ...
@overload
def append(self, other: Index | Sequence[Index]) -> Index: ...
def putmask(self, mask, value): ...
def equals(self, other) -> bool: ...
@final
def identical(self, other) -> bool: ...
@final
def asof(self, label): ...
def asof_locs(self, where, mask): ...
def sort_values(
self,
*,
return_indexer: bool = ...,
ascending: bool = ...,
na_position: NaPosition = ...,
key: Callable[[Index], Index] | None = None,
): ...
@final
def sort(self, *args, **kwargs) -> None: ...
def argsort(self, *args, **kwargs): ...
def get_indexer_non_unique(self, target): ...
@final
def get_indexer_for(self, target, **kwargs): ...
@final
def groupby(self, values) -> dict[Hashable, np.ndarray]: ...
def map(self, mapper, na_action=...) -> Index: ...
def isin(self, values, level=...) -> np_1darray[np.bool]: ...
def slice_indexer(
self,
start: Label | None = None,
end: Label | None = None,
step: int | None = None,
): ...
def get_slice_bound(self, label, side): ...
def slice_locs(
self, start: SliceType = None, end: SliceType = None, step: int | None = None
): ...
def delete(self, loc) -> Self: ...
def insert(self, loc, item) -> Self: ...
def drop(self, labels, errors: IgnoreRaise = "raise") -> Self: ...
@property
def shape(self) -> tuple[int, ...]: ...
# Extra methods from old stubs
def __eq__(self, other: object) -> np_1darray[np.bool]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
def __iter__(self) -> Iterator[S1]: ...
def __ne__(self, other: object) -> np_1darray[np.bool]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
def __le__(self, other: Self | S1) -> np_1darray[np.bool]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
def __ge__(self, other: Self | S1) -> np_1darray[np.bool]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
def __lt__(self, other: Self | S1) -> np_1darray[np.bool]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
def __gt__(self, other: Self | S1) -> np_1darray[np.bool]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
# overwrite inherited methods from OpsMixin
@overload
def __mul__(
self: Index[int] | Index[float], other: timedelta
) -> TimedeltaIndex: ...
@overload
def __mul__(self, other: Any) -> Self: ...
def __floordiv__(
self,
other: (
float
| IndexOpsMixin[int]
| IndexOpsMixin[float]
| Sequence[int]
| Sequence[float]
),
) -> Self: ...
def __rfloordiv__(
self,
other: (
float
| IndexOpsMixin[int]
| IndexOpsMixin[float]
| Sequence[int]
| Sequence[float]
),
) -> Self: ...
def __truediv__(
self,
other: (
float
| IndexOpsMixin[int]
| IndexOpsMixin[float]
| Sequence[int]
| Sequence[float]
),
) -> Self: ...
def __rtruediv__(
self,
other: (
float
| IndexOpsMixin[int]
| IndexOpsMixin[float]
| Sequence[int]
| Sequence[float]
),
) -> Self: ...
def infer_objects(self, copy: bool = True) -> Self: ...
@type_check_only
class _IndexSubclassBase(Index[S1], Generic[S1, GenericT_co]):
@overload
def to_numpy( # pyrefly: ignore
self,
dtype: None = None,
copy: bool = False,
na_value: Scalar = ...,
**kwargs,
) -> np_1darray[GenericT_co]: ...
@overload
def to_numpy(
self,
dtype: np.dtype[GenericT] | SupportsDType[GenericT] | type[GenericT],
copy: bool = False,
na_value: Scalar = ...,
**kwargs,
) -> np_1darray[GenericT]: ...
@overload
def to_numpy(
self,
dtype: DTypeLike,
copy: bool = False,
na_value: Scalar = ...,
**kwargs,
) -> np_1darray: ...

View File

@ -0,0 +1,53 @@
from collections.abc import (
Hashable,
Iterable,
)
from typing import (
final,
)
import numpy as np
from pandas.core import accessor
from pandas.core.indexes.base import Index
from pandas.core.indexes.extension import ExtensionIndex
from typing_extensions import Self
from pandas._typing import (
S1,
DtypeArg,
)
class CategoricalIndex(ExtensionIndex[S1], accessor.PandasDelegate):
codes: np.ndarray = ...
categories: Index = ...
def __new__(
cls,
data: Iterable[S1] = ...,
categories=...,
ordered=...,
dtype=...,
copy: bool = ...,
name: Hashable = ...,
) -> Self: ...
def equals(self, other): ...
@property
def inferred_type(self) -> str: ...
@property
def values(self): ...
def __contains__(self, key) -> bool: ...
def __array__(
self, dtype: DtypeArg = ..., copy: bool | None = ...
) -> np.ndarray: ...
@property
def is_unique(self) -> bool: ...
@property
def is_monotonic_increasing(self) -> bool: ...
@property
def is_monotonic_decreasing(self) -> bool: ...
def unique(self, level=...): ...
def reindex(self, target, method=..., level=..., limit=..., tolerance=...): ...
@final
def get_indexer(self, target, method=..., limit=..., tolerance=...): ...
def get_indexer_non_unique(self, target): ...
def delete(self, loc): ...
def insert(self, loc, item): ...

View File

@ -0,0 +1,40 @@
import numpy as np
from pandas.core.indexes.extension import ExtensionIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from typing_extensions import Self
from pandas._libs.tslibs import BaseOffset
from pandas._typing import (
S1,
AxisIndex,
GenericT_co,
TimeUnit,
)
class DatetimeIndexOpsMixin(ExtensionIndex[S1, GenericT_co]):
@property
def freq(self) -> BaseOffset | None: ...
@property
def freqstr(self) -> str | None: ...
@property
def is_all_dates(self) -> bool: ...
def min(
self, axis: AxisIndex | None = None, skipna: bool = True, *args, **kwargs
) -> S1: ...
def argmin(
self, axis: AxisIndex | None = None, skipna: bool = True, *args, **kwargs
) -> np.int64: ...
def max(
self, axis: AxisIndex | None = None, skipna: bool = True, *args, **kwargs
) -> S1: ...
def argmax(
self, axis: AxisIndex | None = None, skipna: bool = True, *args, **kwargs
) -> np.int64: ...
def __rsub__( # type: ignore[override]
self, other: DatetimeIndexOpsMixin
) -> TimedeltaIndex: ...
class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin[S1, GenericT_co]):
@property
def unit(self) -> TimeUnit: ...
def as_unit(self, unit: TimeUnit) -> Self: ...

View File

@ -0,0 +1,177 @@
from collections.abc import (
Hashable,
Sequence,
)
from datetime import (
datetime,
timedelta,
tzinfo as _tzinfo,
)
from typing import (
final,
overload,
)
import numpy as np
from pandas import (
DataFrame,
Index,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.indexes.accessors import DatetimeIndexProperties
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
from pandas.core.series import (
TimedeltaSeries,
TimestampSeries,
)
from typing_extensions import Self
from pandas._libs.tslibs.offsets import DateOffset
from pandas._typing import (
AxesData,
DateAndDatetimeLike,
Dtype,
Frequency,
IntervalClosedType,
TimeUnit,
TimeZones,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.tseries.offsets import BaseOffset
class DatetimeIndex(
DatetimeTimedeltaMixin[Timestamp, np.datetime64], DatetimeIndexProperties
):
def __new__(
cls,
data: AxesData,
freq: Frequency = ...,
tz: TimeZones = ...,
ambiguous: str = ...,
dayfirst: bool = ...,
yearfirst: bool = ...,
dtype: Dtype = ...,
copy: bool = ...,
name: Hashable = ...,
) -> Self: ...
def __reduce__(self): ...
# various ignores needed for mypy, as we do want to restrict what can be used in
# arithmetic for these types
@overload
def __add__(self, other: TimedeltaSeries) -> TimestampSeries: ...
@overload
def __add__(
self, other: timedelta | Timedelta | TimedeltaIndex | BaseOffset
) -> DatetimeIndex: ...
@overload
def __sub__(self, other: TimedeltaSeries) -> TimestampSeries: ...
@overload
def __sub__(
self, other: timedelta | Timedelta | TimedeltaIndex | BaseOffset
) -> DatetimeIndex: ...
@overload
def __sub__(
self, other: datetime | Timestamp | DatetimeIndex
) -> TimedeltaIndex: ...
@final
def to_series(self, index=..., name: Hashable = ...) -> TimestampSeries: ...
def snap(self, freq: str = ...): ...
def slice_indexer(self, start=..., end=..., step=...): ...
def searchsorted(self, value, side: str = ..., sorter=...): ...
@property
def inferred_type(self) -> str: ...
def indexer_at_time(self, time, asof: bool = ...): ...
def indexer_between_time(
self,
start_time: datetime | str,
end_time: datetime | str,
include_start: bool = True,
include_end: bool = True,
): ...
def to_julian_date(self) -> Index[float]: ...
def isocalendar(self) -> DataFrame: ...
@property
def tzinfo(self) -> _tzinfo | None: ...
@property
def dtype(self) -> np.dtype | DatetimeTZDtype: ...
def shift(
self, periods: int = 1, freq: DateOffset | Timedelta | str | None = None
) -> Self: ...
@overload
def date_range(
start: str | DateAndDatetimeLike,
end: str | DateAndDatetimeLike,
freq: str | timedelta | Timedelta | BaseOffset | None = None,
tz: TimeZones = None,
normalize: bool = False,
name: Hashable | None = None,
inclusive: IntervalClosedType = "both",
unit: TimeUnit | None = None,
) -> DatetimeIndex: ...
@overload
def date_range(
start: str | DateAndDatetimeLike,
end: str | DateAndDatetimeLike,
periods: int,
tz: TimeZones = None,
normalize: bool = False,
name: Hashable | None = None,
inclusive: IntervalClosedType = "both",
unit: TimeUnit | None = None,
) -> DatetimeIndex: ...
@overload
def date_range(
start: str | DateAndDatetimeLike,
*,
periods: int,
freq: str | timedelta | Timedelta | BaseOffset | None = None,
tz: TimeZones = None,
normalize: bool = False,
name: Hashable | None = None,
inclusive: IntervalClosedType = "both",
unit: TimeUnit | None = None,
) -> DatetimeIndex: ...
@overload
def date_range(
*,
end: str | DateAndDatetimeLike,
periods: int,
freq: str | timedelta | Timedelta | BaseOffset | None = None,
tz: TimeZones = None,
normalize: bool = False,
name: Hashable | None = None,
inclusive: IntervalClosedType = "both",
unit: TimeUnit | None = None,
) -> DatetimeIndex: ...
@overload
def bdate_range(
start: str | DateAndDatetimeLike | None = ...,
end: str | DateAndDatetimeLike | None = ...,
periods: int | None = ...,
freq: str | timedelta | Timedelta | BaseOffset = ...,
tz: TimeZones = ...,
normalize: bool = ...,
name: Hashable | None = ...,
weekmask: str | None = ...,
holidays: None = ...,
inclusive: IntervalClosedType = ...,
) -> DatetimeIndex: ...
@overload
def bdate_range(
start: str | DateAndDatetimeLike | None = ...,
end: str | DateAndDatetimeLike | None = ...,
periods: int | None = ...,
*,
freq: str | timedelta | Timedelta | BaseOffset,
tz: TimeZones = ...,
normalize: bool = ...,
name: Hashable | None = ...,
weekmask: str | None = ...,
holidays: Sequence[str | DateAndDatetimeLike],
inclusive: IntervalClosedType = ...,
) -> DatetimeIndex: ...

View File

@ -0,0 +1,8 @@
from pandas.core.indexes.base import _IndexSubclassBase
from pandas._typing import (
S1,
GenericT_co,
)
class ExtensionIndex(_IndexSubclassBase[S1, GenericT_co]): ...

View File

@ -0,0 +1,9 @@
class FrozenList(list):
def union(self, other) -> FrozenList: ...
def difference(self, other) -> FrozenList: ...
def __getitem__(self, n): ...
def __radd__(self, other): ...
def __eq__(self, other) -> bool: ...
def __mul__(self, other): ...
def __reduce__(self): ...
def __hash__(self) -> int: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride]

View File

@ -0,0 +1,367 @@
from collections.abc import (
Hashable,
Sequence,
)
import datetime as dt
from typing import (
Literal,
final,
overload,
)
import numpy as np
import pandas as pd
from pandas import Index
from pandas.core.indexes.extension import ExtensionIndex
from pandas.core.series import (
TimedeltaSeries,
TimestampSeries,
)
from typing_extensions import TypeAlias
from pandas._libs.interval import (
Interval as Interval,
IntervalMixin,
)
from pandas._libs.tslibs.offsets import BaseOffset
from pandas._typing import (
DatetimeLike,
DtypeArg,
FillnaOptions,
IntervalClosedType,
IntervalT,
Label,
MaskType,
np_1darray,
np_ndarray_anyint,
np_ndarray_bool,
npt,
)
from pandas.core.dtypes.dtypes import IntervalDtype as IntervalDtype
_EdgesInt: TypeAlias = (
Sequence[int]
| npt.NDArray[np.int64]
| npt.NDArray[np.int32]
| npt.NDArray[np.intp]
| pd.Series[int]
| Index[int]
)
_EdgesFloat: TypeAlias = (
Sequence[float] | npt.NDArray[np.float64] | pd.Series[float] | Index[float]
)
_EdgesTimestamp: TypeAlias = (
Sequence[DatetimeLike]
| npt.NDArray[np.datetime64]
| TimestampSeries
| pd.DatetimeIndex
)
_EdgesTimedelta: TypeAlias = (
Sequence[pd.Timedelta]
| npt.NDArray[np.timedelta64]
| TimedeltaSeries
| pd.TimedeltaIndex
)
_TimestampLike: TypeAlias = pd.Timestamp | np.datetime64 | dt.datetime
_TimedeltaLike: TypeAlias = pd.Timedelta | np.timedelta64 | dt.timedelta
class IntervalIndex(ExtensionIndex[IntervalT, np.object_], IntervalMixin):
closed: IntervalClosedType
def __new__(
cls,
data: Sequence[IntervalT],
closed: IntervalClosedType = ...,
dtype: IntervalDtype | None = ...,
copy: bool = ...,
name: Hashable = ...,
verify_integrity: bool = ...,
) -> IntervalIndex[IntervalT]: ...
@overload
@classmethod
def from_breaks( # pyright: ignore[reportOverlappingOverload]
cls,
breaks: _EdgesInt,
closed: IntervalClosedType = ...,
name: Hashable = ...,
copy: bool = ...,
dtype: IntervalDtype | None = ...,
) -> IntervalIndex[Interval[int]]: ...
@overload
@classmethod
def from_breaks(
cls,
breaks: _EdgesFloat,
closed: IntervalClosedType = ...,
name: Hashable = ...,
copy: bool = ...,
dtype: IntervalDtype | None = ...,
) -> IntervalIndex[Interval[float]]: ...
@overload
@classmethod
def from_breaks(
cls,
breaks: _EdgesTimestamp,
closed: IntervalClosedType = ...,
name: Hashable = ...,
copy: bool = ...,
dtype: IntervalDtype | None = ...,
) -> IntervalIndex[Interval[pd.Timestamp]]: ...
@overload
@classmethod
def from_breaks(
cls,
breaks: _EdgesTimedelta,
closed: IntervalClosedType = ...,
name: Hashable = ...,
copy: bool = ...,
dtype: IntervalDtype | None = ...,
) -> IntervalIndex[Interval[pd.Timedelta]]: ...
@overload
@classmethod
def from_arrays( # pyright: ignore[reportOverlappingOverload]
cls,
left: _EdgesInt,
right: _EdgesInt,
closed: IntervalClosedType = ...,
name: Hashable = ...,
copy: bool = ...,
dtype: IntervalDtype | None = ...,
) -> IntervalIndex[Interval[int]]: ...
@overload
@classmethod
def from_arrays(
cls,
left: _EdgesFloat,
right: _EdgesFloat,
closed: IntervalClosedType = ...,
name: Hashable = ...,
copy: bool = ...,
dtype: IntervalDtype | None = ...,
) -> IntervalIndex[Interval[float]]: ...
@overload
@classmethod
def from_arrays(
cls,
left: _EdgesTimestamp,
right: _EdgesTimestamp,
closed: IntervalClosedType = ...,
name: Hashable = ...,
copy: bool = ...,
dtype: IntervalDtype | None = ...,
) -> IntervalIndex[Interval[pd.Timestamp]]: ...
@overload
@classmethod
def from_arrays(
cls,
left: _EdgesTimedelta,
right: _EdgesTimedelta,
closed: IntervalClosedType = ...,
name: Hashable = ...,
copy: bool = ...,
dtype: IntervalDtype | None = ...,
) -> IntervalIndex[Interval[pd.Timedelta]]: ...
@overload
@classmethod
def from_tuples( # pyright: ignore[reportOverlappingOverload]
cls,
data: Sequence[tuple[int, int]],
closed: IntervalClosedType = ...,
name: Hashable = ...,
copy: bool = ...,
dtype: IntervalDtype | None = ...,
) -> IntervalIndex[pd.Interval[int]]: ...
# Ignore misc here due to intentional overlap between int and float
@overload
@classmethod
def from_tuples(
cls,
data: Sequence[tuple[float, float]],
closed: IntervalClosedType = ...,
name: Hashable = ...,
copy: bool = ...,
dtype: IntervalDtype | None = ...,
) -> IntervalIndex[pd.Interval[float]]: ...
@overload
@classmethod
def from_tuples(
cls,
data: Sequence[
tuple[pd.Timestamp, pd.Timestamp]
| tuple[dt.datetime, dt.datetime]
| tuple[np.datetime64, np.datetime64]
],
closed: IntervalClosedType = ...,
name: Hashable = ...,
copy: bool = ...,
dtype: IntervalDtype | None = ...,
) -> IntervalIndex[pd.Interval[pd.Timestamp]]: ...
@overload
@classmethod
def from_tuples(
cls,
data: Sequence[
tuple[pd.Timedelta, pd.Timedelta]
| tuple[dt.timedelta, dt.timedelta]
| tuple[np.timedelta64, np.timedelta64]
],
closed: IntervalClosedType = ...,
name: Hashable = ...,
copy: bool = ...,
dtype: IntervalDtype | None = ...,
) -> IntervalIndex[pd.Interval[pd.Timedelta]]: ...
def to_tuples(self, na_tuple: bool = True) -> pd.Index: ...
@overload
def __contains__(self, key: IntervalT) -> bool: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __contains__(self, key: object) -> Literal[False]: ...
def astype(self, dtype: DtypeArg, copy: bool = True) -> IntervalIndex: ...
@property
def inferred_type(self) -> str: ...
def memory_usage(self, deep: bool = False) -> int: ...
@property
def is_overlapping(self) -> bool: ...
def get_loc(self, key: Label) -> int | slice | np_1darray[np.bool]: ...
@final
def get_indexer(
self,
target: Index,
method: FillnaOptions | Literal["nearest"] | None = ...,
limit: int | None = ...,
tolerance=...,
) -> npt.NDArray[np.intp]: ...
def get_indexer_non_unique(
self, target: Index
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
@property
def left(self) -> Index: ...
@property
def right(self) -> Index: ...
@property
def mid(self) -> Index: ...
@property
def length(self) -> Index: ...
@overload # type: ignore[override]
def __getitem__(
self,
idx: (
slice
| np_ndarray_anyint
| Sequence[int]
| Index
| MaskType
| np_ndarray_bool
),
) -> IntervalIndex[IntervalT]: ...
@overload
def __getitem__( # pyright: ignore[reportIncompatibleMethodOverride]
self, idx: int
) -> IntervalT: ...
@overload # type: ignore[override]
def __gt__(
self, other: IntervalT | IntervalIndex[IntervalT]
) -> np_1darray[np.bool]: ...
@overload
def __gt__( # pyright: ignore[reportIncompatibleMethodOverride]
self, other: pd.Series[IntervalT]
) -> pd.Series[bool]: ...
@overload # type: ignore[override]
def __ge__(
self, other: IntervalT | IntervalIndex[IntervalT]
) -> np_1darray[np.bool]: ...
@overload
def __ge__( # pyright: ignore[reportIncompatibleMethodOverride]
self, other: pd.Series[IntervalT]
) -> pd.Series[bool]: ...
@overload # type: ignore[override]
def __le__(
self, other: IntervalT | IntervalIndex[IntervalT]
) -> np_1darray[np.bool]: ...
@overload
def __le__( # pyright: ignore[reportIncompatibleMethodOverride]
self, other: pd.Series[IntervalT]
) -> pd.Series[bool]: ...
@overload # type: ignore[override]
def __lt__(
self, other: IntervalT | IntervalIndex[IntervalT]
) -> np_1darray[np.bool]: ...
@overload
def __lt__( # pyright: ignore[reportIncompatibleMethodOverride]
self, other: pd.Series[IntervalT]
) -> pd.Series[bool]: ...
@overload # type: ignore[override]
def __eq__(self, other: IntervalT | IntervalIndex[IntervalT]) -> np_1darray[np.bool]: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __eq__(self, other: pd.Series[IntervalT]) -> pd.Series[bool]: ... # type: ignore[overload-overlap]
@overload
def __eq__( # pyright: ignore[reportIncompatibleMethodOverride]
self, other: object
) -> Literal[False]: ...
@overload # type: ignore[override]
def __ne__(self, other: IntervalT | IntervalIndex[IntervalT]) -> np_1darray[np.bool]: ... # type: ignore[overload-overlap] # pyright: ignore[reportOverlappingOverload]
@overload
def __ne__(self, other: pd.Series[IntervalT]) -> pd.Series[bool]: ... # type: ignore[overload-overlap]
@overload
def __ne__( # pyright: ignore[reportIncompatibleMethodOverride]
self, other: object
) -> Literal[True]: ...
# misc here because int and float overlap but interval has distinct types
# int gets hit first and so the correct type is returned
@overload
def interval_range( # pyright: ignore[reportOverlappingOverload]
start: int | None = ...,
end: int | None = ...,
periods: int | None = ...,
freq: int | None = ...,
name: Hashable = ...,
closed: IntervalClosedType = ...,
) -> IntervalIndex[Interval[int]]: ...
@overload
def interval_range(
start: float | None = ...,
end: float | None = ...,
periods: int | None = ...,
freq: int | None = ...,
name: Hashable = ...,
closed: IntervalClosedType = ...,
) -> IntervalIndex[Interval[float]]: ...
@overload
def interval_range(
start: _TimestampLike,
end: _TimestampLike | None = ...,
periods: int | None = ...,
freq: str | BaseOffset | pd.Timedelta | dt.timedelta | None = ...,
name: Hashable = ...,
closed: IntervalClosedType = ...,
) -> IntervalIndex[Interval[pd.Timestamp]]: ...
@overload
def interval_range(
*,
start: None = ...,
end: _TimestampLike,
periods: int | None = ...,
freq: str | BaseOffset | pd.Timedelta | dt.timedelta | None = ...,
name: Hashable = ...,
closed: IntervalClosedType = ...,
) -> IntervalIndex[Interval[pd.Timestamp]]: ...
@overload
def interval_range(
start: _TimedeltaLike,
end: _TimedeltaLike | None = ...,
periods: int | None = ...,
freq: str | BaseOffset | pd.Timedelta | dt.timedelta | None = ...,
name: Hashable = ...,
closed: IntervalClosedType = ...,
) -> IntervalIndex[Interval[pd.Timedelta]]: ...
@overload
def interval_range(
*,
start: None = ...,
end: _TimedeltaLike,
periods: int | None = ...,
freq: str | BaseOffset | pd.Timedelta | dt.timedelta | None = ...,
name: Hashable = ...,
closed: IntervalClosedType = ...,
) -> IntervalIndex[Interval[pd.Timedelta]]: ...

View File

@ -0,0 +1,164 @@
from collections.abc import (
Callable,
Hashable,
Iterable,
Sequence,
)
from typing import (
final,
overload,
)
import numpy as np
import pandas as pd
from pandas.core.indexes.base import Index
from typing_extensions import Self
from pandas._typing import (
AnyAll,
Axes,
DropKeep,
Dtype,
HashableT,
IndexLabel,
Level,
MaskType,
NaPosition,
SequenceNotStr,
np_1darray,
np_ndarray_anyint,
)
class MultiIndex(Index):
def __new__(
cls,
levels: Sequence[SequenceNotStr[Hashable]] = ...,
codes: Sequence[Sequence[int]] = ...,
sortorder: int | None = ...,
names: SequenceNotStr[Hashable] = ...,
copy: bool = ...,
name: SequenceNotStr[Hashable] = ...,
verify_integrity: bool = ...,
) -> Self: ...
@classmethod
def from_arrays(
cls,
arrays: Sequence[Axes],
sortorder: int | None = ...,
names: SequenceNotStr[Hashable] = ...,
) -> Self: ...
@classmethod
def from_tuples(
cls,
tuples: Iterable[tuple[Hashable, ...]],
sortorder: int | None = ...,
names: SequenceNotStr[Hashable] = ...,
) -> Self: ...
@classmethod
def from_product(
cls,
iterables: Sequence[SequenceNotStr[Hashable] | pd.Series | pd.Index | range],
sortorder: int | None = ...,
names: SequenceNotStr[Hashable] = ...,
) -> Self: ...
@classmethod
def from_frame(
cls,
df: pd.DataFrame,
sortorder: int | None = ...,
names: SequenceNotStr[Hashable] = ...,
) -> Self: ...
@property
def shape(self): ...
@property # Should be read-only
def levels(self) -> list[Index]: ...
def set_levels(self, levels, *, level=..., verify_integrity: bool = ...): ...
@property
def codes(self): ...
def set_codes(self, codes, *, level=..., verify_integrity: bool = ...): ...
def copy( # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore
self, names: SequenceNotStr[Hashable] = ..., deep: bool = False
) -> Self: ...
def view(self, cls=...): ...
def __contains__(self, key) -> bool: ...
@property
def dtype(self) -> np.dtype: ...
@property
def dtypes(self) -> pd.Series[Dtype]: ...
def memory_usage(self, deep: bool = False) -> int: ...
@property
def nbytes(self) -> int: ...
def format(
self,
name: bool | None = ...,
formatter: Callable | None = ...,
na_rep: str | None = ...,
names: bool = ...,
space: int = ...,
sparsify: bool | None = ...,
adjoin: bool = ...,
) -> list: ...
def __len__(self) -> int: ...
@property
def values(self): ...
@property
def is_monotonic_increasing(self) -> bool: ...
@property
def is_monotonic_decreasing(self) -> bool: ...
def duplicated(self, keep: DropKeep = "first"): ...
def dropna(self, how: AnyAll = "any") -> Self: ...
def get_level_values(self, level: str | int) -> Index: ...
def unique(self, level=...): ...
def to_frame( # pyrefly: ignore
self,
index: bool = True,
name: list[HashableT] = ...,
allow_duplicates: bool = False,
) -> pd.DataFrame: ...
def to_flat_index(self): ...
def remove_unused_levels(self): ...
@property
def nlevels(self) -> int: ...
@property
def levshape(self): ...
def __reduce__(self): ...
@overload # type: ignore[override]
def __getitem__(
self,
idx: slice | np_ndarray_anyint | Sequence[int] | Index | MaskType,
) -> Self: ...
@overload
def __getitem__( # pyright: ignore[reportIncompatibleMethodOverride]
self, key: int
) -> tuple: ...
def append(self, other): ... # pyrefly: ignore
def repeat(self, repeats, axis=...): ...
def drop(self, codes, level: Level | None = None, errors: str = "raise") -> Self: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
def swaplevel(self, i: int = -2, j: int = -1): ...
def reorder_levels(self, order): ...
def sortlevel(
self,
level: Level | Sequence[Level] = 0,
ascending: bool = True,
sort_remaining: bool = True,
na_position: NaPosition = "first",
): ...
@final
def get_indexer(self, target, method=..., limit=..., tolerance=...): ...
def get_indexer_non_unique(self, target): ...
def reindex(self, target, method=..., level=..., limit=..., tolerance=...): ...
def get_slice_bound(
self, label: Hashable | Sequence[Hashable], side: str
) -> int: ...
def get_loc_level(
self, key, level: Level | list[Level] | None = None, drop_level: bool = True
): ...
def get_locs(self, seq): ...
def truncate(
self, before: IndexLabel | None = None, after: IndexLabel | None = None
): ...
def equals(self, other) -> bool: ...
def equal_levels(self, other): ...
def insert(self, loc, item): ...
def delete(self, loc): ...
def isin(self, values, level=...) -> np_1darray[np.bool]: ...

Some files were not shown because too many files have changed in this diff Show More