done
This commit is contained in:
@ -0,0 +1,5 @@
|
||||
from pandas.io import (
|
||||
formats as formats,
|
||||
json as json,
|
||||
stata as stata,
|
||||
)
|
33
lib/python3.11/site-packages/pandas-stubs/io/api.pyi
Normal file
33
lib/python3.11/site-packages/pandas-stubs/io/api.pyi
Normal file
@ -0,0 +1,33 @@
|
||||
from pandas.io.clipboards import read_clipboard as read_clipboard
|
||||
from pandas.io.excel import (
|
||||
ExcelFile as ExcelFile,
|
||||
ExcelWriter as ExcelWriter,
|
||||
read_excel as read_excel,
|
||||
)
|
||||
from pandas.io.feather_format import read_feather as read_feather
|
||||
from pandas.io.gbq import read_gbq as read_gbq
|
||||
from pandas.io.html import read_html as read_html
|
||||
from pandas.io.json import read_json as read_json
|
||||
from pandas.io.orc import read_orc as read_orc
|
||||
from pandas.io.parquet import read_parquet as read_parquet
|
||||
from pandas.io.parsers import (
|
||||
read_csv as read_csv,
|
||||
read_fwf as read_fwf,
|
||||
read_table as read_table,
|
||||
)
|
||||
from pandas.io.pickle import (
|
||||
read_pickle as read_pickle,
|
||||
)
|
||||
from pandas.io.pytables import (
|
||||
HDFStore as HDFStore,
|
||||
read_hdf as read_hdf,
|
||||
)
|
||||
from pandas.io.sas import read_sas as read_sas
|
||||
from pandas.io.spss import read_spss as read_spss
|
||||
from pandas.io.sql import (
|
||||
read_sql as read_sql,
|
||||
read_sql_query as read_sql_query,
|
||||
read_sql_table as read_sql_table,
|
||||
)
|
||||
from pandas.io.stata import read_stata as read_stata
|
||||
from pandas.io.xml import read_xml as read_xml
|
205
lib/python3.11/site-packages/pandas-stubs/io/clipboards.pyi
Normal file
205
lib/python3.11/site-packages/pandas-stubs/io/clipboards.pyi
Normal file
@ -0,0 +1,205 @@
|
||||
from collections import defaultdict
|
||||
from collections.abc import (
|
||||
Callable,
|
||||
Sequence,
|
||||
)
|
||||
import csv
|
||||
from typing import (
|
||||
Any,
|
||||
Literal,
|
||||
overload,
|
||||
)
|
||||
|
||||
from pandas.core.frame import DataFrame
|
||||
|
||||
from pandas._libs.lib import _NoDefaultDoNotUse
|
||||
from pandas._typing import (
|
||||
CompressionOptions,
|
||||
CSVEngine,
|
||||
CSVQuoting,
|
||||
DtypeArg,
|
||||
DtypeBackend,
|
||||
ListLikeHashable,
|
||||
StorageOptions,
|
||||
UsecolsArgType,
|
||||
)
|
||||
|
||||
from pandas.io.parsers import TextFileReader
|
||||
|
||||
@overload
|
||||
def read_clipboard(
|
||||
sep: str | None = ...,
|
||||
*,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
delimiter: str | None = ...,
|
||||
header: int | Sequence[int] | Literal["infer"] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | str | Sequence[str | int] | Literal[False] | None = ...,
|
||||
usecols: UsecolsArgType = ...,
|
||||
dtype: DtypeArg | defaultdict | None = ...,
|
||||
engine: CSVEngine | None = ...,
|
||||
converters: dict[int | str, Callable[[str], Any]] = ...,
|
||||
true_values: list[str] = ...,
|
||||
false_values: list[str] = ...,
|
||||
skipinitialspace: bool = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[int], bool] = ...,
|
||||
skipfooter: int = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | dict[str, Sequence[str]] = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
verbose: bool = ...,
|
||||
skip_blank_lines: bool = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| Sequence[int]
|
||||
| list[str]
|
||||
| Sequence[Sequence[int]]
|
||||
| dict[str, Sequence[int]]
|
||||
) = ...,
|
||||
infer_datetime_format: bool = ...,
|
||||
keep_date_col: bool = ...,
|
||||
date_parser: Callable = ...,
|
||||
dayfirst: bool = ...,
|
||||
cache_dates: bool = ...,
|
||||
iterator: Literal[True],
|
||||
chunksize: int | None = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
thousands: str | None = ...,
|
||||
decimal: str = ...,
|
||||
lineterminator: str | None = ...,
|
||||
quotechar: str = ...,
|
||||
quoting: CSVQuoting = ...,
|
||||
doublequote: bool = ...,
|
||||
escapechar: str | None = ...,
|
||||
comment: str | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: str | None = ...,
|
||||
dialect: str | csv.Dialect = ...,
|
||||
on_bad_lines: (
|
||||
Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None]
|
||||
) = ...,
|
||||
delim_whitespace: bool = ...,
|
||||
low_memory: bool = ...,
|
||||
memory_map: bool = ...,
|
||||
float_precision: Literal["high", "legacy", "round_trip"] | None = ...,
|
||||
storage_options: StorageOptions | None = ...,
|
||||
) -> TextFileReader: ...
|
||||
@overload
|
||||
def read_clipboard(
|
||||
sep: str | None = ...,
|
||||
*,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
delimiter: str | None = ...,
|
||||
header: int | Sequence[int] | Literal["infer"] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | str | Sequence[str | int] | Literal[False] | None = ...,
|
||||
usecols: UsecolsArgType = ...,
|
||||
dtype: DtypeArg | defaultdict | None = ...,
|
||||
engine: CSVEngine | None = ...,
|
||||
converters: dict[int | str, Callable[[str], Any]] = ...,
|
||||
true_values: list[str] = ...,
|
||||
false_values: list[str] = ...,
|
||||
skipinitialspace: bool = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[int], bool] = ...,
|
||||
skipfooter: int = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | dict[str, Sequence[str]] = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
verbose: bool = ...,
|
||||
skip_blank_lines: bool = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| Sequence[int]
|
||||
| list[str]
|
||||
| Sequence[Sequence[int]]
|
||||
| dict[str, Sequence[int]]
|
||||
) = ...,
|
||||
infer_datetime_format: bool = ...,
|
||||
keep_date_col: bool = ...,
|
||||
date_parser: Callable = ...,
|
||||
dayfirst: bool = ...,
|
||||
cache_dates: bool = ...,
|
||||
iterator: bool = ...,
|
||||
chunksize: int,
|
||||
compression: CompressionOptions = ...,
|
||||
thousands: str | None = ...,
|
||||
decimal: str = ...,
|
||||
lineterminator: str | None = ...,
|
||||
quotechar: str = ...,
|
||||
quoting: CSVQuoting = ...,
|
||||
doublequote: bool = ...,
|
||||
escapechar: str | None = ...,
|
||||
comment: str | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: str | None = ...,
|
||||
dialect: str | csv.Dialect = ...,
|
||||
on_bad_lines: (
|
||||
Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None]
|
||||
) = ...,
|
||||
delim_whitespace: bool = ...,
|
||||
low_memory: bool = ...,
|
||||
memory_map: bool = ...,
|
||||
float_precision: Literal["high", "legacy", "round_trip"] | None = ...,
|
||||
storage_options: StorageOptions | None = ...,
|
||||
) -> TextFileReader: ...
|
||||
@overload
|
||||
def read_clipboard(
|
||||
sep: str | None = ...,
|
||||
*,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
delimiter: str | None = ...,
|
||||
header: int | Sequence[int] | Literal["infer"] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | str | Sequence[str | int] | Literal[False] | None = ...,
|
||||
usecols: UsecolsArgType = ...,
|
||||
dtype: DtypeArg | defaultdict | None = ...,
|
||||
engine: CSVEngine | None = ...,
|
||||
converters: dict[int | str, Callable[[str], Any]] = ...,
|
||||
true_values: list[str] = ...,
|
||||
false_values: list[str] = ...,
|
||||
skipinitialspace: bool = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[int], bool] = ...,
|
||||
skipfooter: int = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | dict[str, Sequence[str]] = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
verbose: bool = ...,
|
||||
skip_blank_lines: bool = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| Sequence[int]
|
||||
| list[str]
|
||||
| Sequence[Sequence[int]]
|
||||
| dict[str, Sequence[int]]
|
||||
) = ...,
|
||||
infer_datetime_format: bool = ...,
|
||||
keep_date_col: bool = ...,
|
||||
date_parser: Callable = ...,
|
||||
dayfirst: bool = ...,
|
||||
cache_dates: bool = ...,
|
||||
iterator: Literal[False] = ...,
|
||||
chunksize: None = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
thousands: str | None = ...,
|
||||
decimal: str = ...,
|
||||
lineterminator: str | None = ...,
|
||||
quotechar: str = ...,
|
||||
quoting: CSVQuoting = ...,
|
||||
doublequote: bool = ...,
|
||||
escapechar: str | None = ...,
|
||||
comment: str | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: str | None = ...,
|
||||
dialect: str | csv.Dialect = ...,
|
||||
on_bad_lines: (
|
||||
Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None]
|
||||
) = ...,
|
||||
delim_whitespace: bool = ...,
|
||||
low_memory: bool = ...,
|
||||
memory_map: bool = ...,
|
||||
float_precision: Literal["high", "legacy", "round_trip"] | None = ...,
|
||||
storage_options: StorageOptions | None = ...,
|
||||
) -> DataFrame: ...
|
@ -0,0 +1,5 @@
|
||||
from pandas.io.excel._base import (
|
||||
ExcelFile as ExcelFile,
|
||||
ExcelWriter as ExcelWriter,
|
||||
read_excel as read_excel,
|
||||
)
|
327
lib/python3.11/site-packages/pandas-stubs/io/excel/_base.pyi
Normal file
327
lib/python3.11/site-packages/pandas-stubs/io/excel/_base.pyi
Normal file
@ -0,0 +1,327 @@
|
||||
from collections.abc import (
|
||||
Callable,
|
||||
Hashable,
|
||||
Iterable,
|
||||
Mapping,
|
||||
Sequence,
|
||||
)
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
Any,
|
||||
Literal,
|
||||
overload,
|
||||
)
|
||||
|
||||
from odf.opendocument import OpenDocument
|
||||
from openpyxl.workbook.workbook import Workbook
|
||||
from pandas.core.frame import DataFrame
|
||||
import pyxlsb.workbook
|
||||
from typing_extensions import Self
|
||||
from xlrd.book import Book
|
||||
|
||||
from pandas._libs.lib import _NoDefaultDoNotUse
|
||||
from pandas._typing import (
|
||||
Dtype,
|
||||
DtypeBackend,
|
||||
ExcelReadEngine,
|
||||
ExcelWriteEngine,
|
||||
ExcelWriterIfSheetExists,
|
||||
FilePath,
|
||||
IntStrT,
|
||||
ListLikeHashable,
|
||||
ReadBuffer,
|
||||
StorageOptions,
|
||||
UsecolsArgType,
|
||||
WriteExcelBuffer,
|
||||
)
|
||||
|
||||
@overload
|
||||
def read_excel(
|
||||
io: (
|
||||
FilePath
|
||||
| ReadBuffer[bytes]
|
||||
| ExcelFile
|
||||
| Workbook
|
||||
| Book
|
||||
| OpenDocument
|
||||
| pyxlsb.workbook.Workbook
|
||||
),
|
||||
sheet_name: list[IntStrT],
|
||||
*,
|
||||
header: int | Sequence[int] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | Sequence[int] | str | None = ...,
|
||||
usecols: str | UsecolsArgType = ...,
|
||||
dtype: str | Dtype | Mapping[str, str | Dtype] | None = ...,
|
||||
engine: ExcelReadEngine | None = ...,
|
||||
converters: Mapping[int | str, Callable[[object], object]] | None = ...,
|
||||
true_values: Iterable[Hashable] | None = ...,
|
||||
false_values: Iterable[Hashable] | None = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[object], bool] | None = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | dict[str | int, Sequence[str]] | None = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
verbose: bool = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| Sequence[int]
|
||||
| Sequence[Sequence[str] | Sequence[int]]
|
||||
| dict[str, Sequence[int] | list[str]]
|
||||
) = ...,
|
||||
date_format: dict[Hashable, str] | str | None = ...,
|
||||
thousands: str | None = ...,
|
||||
decimal: str = ...,
|
||||
comment: str | None = ...,
|
||||
skipfooter: int = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
engine_kwargs: dict[str, Any] | None = ...,
|
||||
) -> dict[IntStrT, DataFrame]: ...
|
||||
@overload
|
||||
def read_excel(
|
||||
io: (
|
||||
FilePath
|
||||
| ReadBuffer[bytes]
|
||||
| ExcelFile
|
||||
| Workbook
|
||||
| Book
|
||||
| OpenDocument
|
||||
| pyxlsb.workbook.Workbook
|
||||
),
|
||||
sheet_name: None,
|
||||
*,
|
||||
header: int | Sequence[int] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | Sequence[int] | str | None = ...,
|
||||
usecols: str | UsecolsArgType = ...,
|
||||
dtype: str | Dtype | Mapping[str, str | Dtype] | None = ...,
|
||||
engine: ExcelReadEngine | None = ...,
|
||||
converters: Mapping[int | str, Callable[[object], object]] | None = ...,
|
||||
true_values: Iterable[Hashable] | None = ...,
|
||||
false_values: Iterable[Hashable] | None = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[object], bool] | None = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | dict[str | int, Sequence[str]] | None = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
verbose: bool = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| Sequence[int]
|
||||
| Sequence[Sequence[str] | Sequence[int]]
|
||||
| dict[str, Sequence[int] | list[str]]
|
||||
) = ...,
|
||||
date_format: dict[Hashable, str] | str | None = ...,
|
||||
thousands: str | None = ...,
|
||||
decimal: str = ...,
|
||||
comment: str | None = ...,
|
||||
skipfooter: int = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
engine_kwargs: dict[str, Any] | None = ...,
|
||||
) -> dict[str, DataFrame]: ...
|
||||
@overload
|
||||
# mypy says this won't be matched
|
||||
def read_excel( # type: ignore[overload-cannot-match]
|
||||
io: (
|
||||
FilePath
|
||||
| ReadBuffer[bytes]
|
||||
| ExcelFile
|
||||
| Workbook
|
||||
| Book
|
||||
| OpenDocument
|
||||
| pyxlsb.workbook.Workbook
|
||||
),
|
||||
sheet_name: list[int | str],
|
||||
*,
|
||||
header: int | Sequence[int] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | Sequence[int] | str | None = ...,
|
||||
usecols: str | UsecolsArgType = ...,
|
||||
dtype: str | Dtype | Mapping[str, str | Dtype] | None = ...,
|
||||
engine: ExcelReadEngine | None = ...,
|
||||
converters: Mapping[int | str, Callable[[object], object]] | None = ...,
|
||||
true_values: Iterable[Hashable] | None = ...,
|
||||
false_values: Iterable[Hashable] | None = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[object], bool] | None = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | dict[str | int, Sequence[str]] | None = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
verbose: bool = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| Sequence[int]
|
||||
| Sequence[Sequence[str] | Sequence[int]]
|
||||
| dict[str, Sequence[int] | list[str]]
|
||||
) = ...,
|
||||
date_format: dict[Hashable, str] | str | None = ...,
|
||||
thousands: str | None = ...,
|
||||
decimal: str = ...,
|
||||
comment: str | None = ...,
|
||||
skipfooter: int = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
engine_kwargs: dict[str, Any] | None = ...,
|
||||
) -> dict[int | str, DataFrame]: ...
|
||||
@overload
|
||||
def read_excel(
|
||||
io: (
|
||||
FilePath
|
||||
| ReadBuffer[bytes]
|
||||
| ExcelFile
|
||||
| Workbook
|
||||
| Book
|
||||
| OpenDocument
|
||||
| pyxlsb.workbook.Workbook
|
||||
),
|
||||
sheet_name: int | str = ...,
|
||||
*,
|
||||
header: int | Sequence[int] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | Sequence[int] | str | None = ...,
|
||||
usecols: str | UsecolsArgType = ...,
|
||||
dtype: str | Dtype | Mapping[str, str | Dtype] | None = ...,
|
||||
engine: ExcelReadEngine | None = ...,
|
||||
converters: Mapping[int | str, Callable[[object], object]] | None = ...,
|
||||
true_values: Iterable[Hashable] | None = ...,
|
||||
false_values: Iterable[Hashable] | None = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[object], bool] | None = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | dict[str | int, Sequence[str]] | None = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
verbose: bool = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| Sequence[int]
|
||||
| Sequence[Sequence[str] | Sequence[int]]
|
||||
| dict[str, Sequence[int] | list[str]]
|
||||
) = ...,
|
||||
date_format: dict[Hashable, str] | str | None = ...,
|
||||
thousands: str | None = ...,
|
||||
decimal: str = ...,
|
||||
comment: str | None = ...,
|
||||
skipfooter: int = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
engine_kwargs: dict[str, Any] | None = ...,
|
||||
) -> DataFrame: ...
|
||||
|
||||
class ExcelWriter:
|
||||
def __init__(
|
||||
self,
|
||||
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
||||
engine: ExcelWriteEngine | Literal["auto"] | None = ...,
|
||||
date_format: str | None = ...,
|
||||
datetime_format: str | None = ...,
|
||||
mode: Literal["w", "a"] = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
if_sheet_exists: ExcelWriterIfSheetExists | None = ...,
|
||||
engine_kwargs: dict[str, Any] | None = ...,
|
||||
) -> None: ...
|
||||
@property
|
||||
def supported_extensions(self) -> tuple[str, ...]: ...
|
||||
@property
|
||||
def engine(self) -> ExcelWriteEngine: ...
|
||||
@property
|
||||
def sheets(self) -> dict[str, Any]: ...
|
||||
@property
|
||||
def book(self) -> Workbook | OpenDocument: ...
|
||||
@property
|
||||
def date_format(self) -> str: ...
|
||||
@property
|
||||
def datetime_format(self) -> str: ...
|
||||
@property
|
||||
def if_sheet_exists(self) -> Literal["error", "new", "replace", "overlay"]: ...
|
||||
def __fspath__(self) -> str: ...
|
||||
def __enter__(self) -> Self: ...
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
) -> None: ...
|
||||
def close(self) -> None: ...
|
||||
|
||||
class ExcelFile:
|
||||
engine = ...
|
||||
io: FilePath | ReadBuffer[bytes] | bytes = ...
|
||||
def __init__(
|
||||
self,
|
||||
path_or_buffer: FilePath | ReadBuffer[bytes] | bytes,
|
||||
engine: ExcelReadEngine | None = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
engine_kwargs: dict[str, Any] | None = ...,
|
||||
) -> None: ...
|
||||
def __fspath__(self): ...
|
||||
@overload
|
||||
def parse(
|
||||
self,
|
||||
sheet_name: list[int | str] | None,
|
||||
header: int | Sequence[int] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | Sequence[int] | None = ...,
|
||||
usecols: str | UsecolsArgType = ...,
|
||||
converters: dict[int | str, Callable[[object], object]] | None = ...,
|
||||
true_values: Iterable[Hashable] | None = ...,
|
||||
false_values: Iterable[Hashable] | None = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[object], bool] | None = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | dict[str | int, Sequence[str]] = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| Sequence[int]
|
||||
| Sequence[Sequence[str] | Sequence[int]]
|
||||
| dict[str, Sequence[int] | list[str]]
|
||||
) = ...,
|
||||
date_parser: Callable | None = ...,
|
||||
thousands: str | None = ...,
|
||||
comment: str | None = ...,
|
||||
skipfooter: int = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
**kwds: Any,
|
||||
) -> dict[int | str, DataFrame]: ...
|
||||
@overload
|
||||
def parse(
|
||||
self,
|
||||
sheet_name: int | str,
|
||||
header: int | Sequence[int] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | Sequence[int] | None = ...,
|
||||
usecols: str | UsecolsArgType = ...,
|
||||
converters: dict[int | str, Callable[[object], object]] | None = ...,
|
||||
true_values: Iterable[Hashable] | None = ...,
|
||||
false_values: Iterable[Hashable] | None = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[object], bool] | None = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | dict[str | int, Sequence[str]] = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| Sequence[int]
|
||||
| Sequence[Sequence[str] | Sequence[int]]
|
||||
| dict[str, Sequence[int] | list[str]]
|
||||
) = ...,
|
||||
date_parser: Callable | None = ...,
|
||||
thousands: str | None = ...,
|
||||
comment: str | None = ...,
|
||||
skipfooter: int = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
**kwds: Any,
|
||||
) -> DataFrame: ...
|
||||
@property
|
||||
def book(self) -> Workbook | Book | OpenDocument | pyxlsb.workbook.Workbook: ...
|
||||
@property
|
||||
def sheet_names(self) -> list[int | str]: ...
|
||||
def close(self) -> None: ...
|
||||
def __enter__(self) -> Self: ...
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
) -> None: ...
|
||||
def __del__(self) -> None: ...
|
@ -0,0 +1,18 @@
|
||||
from pandas import DataFrame
|
||||
|
||||
from pandas._libs.lib import _NoDefaultDoNotUse
|
||||
from pandas._typing import (
|
||||
DtypeBackend,
|
||||
FilePath,
|
||||
HashableT,
|
||||
ReadBuffer,
|
||||
StorageOptions,
|
||||
)
|
||||
|
||||
def read_feather(
|
||||
path: FilePath | ReadBuffer[bytes],
|
||||
columns: list[HashableT] | None = None,
|
||||
use_threads: bool = True,
|
||||
storage_options: StorageOptions = None,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = "numpy_nullable",
|
||||
) -> DataFrame: ...
|
@ -0,0 +1 @@
|
||||
from pandas.io.formats import style as style
|
@ -0,0 +1,10 @@
|
||||
class EngFormatter:
|
||||
ENG_PREFIXES = ...
|
||||
accuracy = ...
|
||||
use_eng_prefix = ...
|
||||
def __init__(
|
||||
self, accuracy: int | None = ..., use_eng_prefix: bool = ...
|
||||
) -> None: ...
|
||||
def __call__(self, num: float) -> str: ...
|
||||
|
||||
def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> None: ...
|
395
lib/python3.11/site-packages/pandas-stubs/io/formats/style.pyi
Normal file
395
lib/python3.11/site-packages/pandas-stubs/io/formats/style.pyi
Normal file
@ -0,0 +1,395 @@
|
||||
from collections.abc import (
|
||||
Callable,
|
||||
Sequence,
|
||||
)
|
||||
from typing import (
|
||||
Any,
|
||||
Literal,
|
||||
Protocol,
|
||||
overload,
|
||||
)
|
||||
|
||||
from matplotlib.colors import Colormap
|
||||
import numpy as np
|
||||
from pandas.core.frame import DataFrame
|
||||
from pandas.core.series import Series
|
||||
|
||||
from pandas._typing import (
|
||||
Axis,
|
||||
ExcelWriterMergeCells,
|
||||
FilePath,
|
||||
HashableT,
|
||||
HashableT1,
|
||||
HashableT2,
|
||||
IndexLabel,
|
||||
IntervalClosedType,
|
||||
Level,
|
||||
QuantileInterpolation,
|
||||
Scalar,
|
||||
StorageOptions,
|
||||
T,
|
||||
WriteBuffer,
|
||||
WriteExcelBuffer,
|
||||
npt,
|
||||
)
|
||||
|
||||
from pandas.io.excel import ExcelWriter
|
||||
from pandas.io.formats.style_render import (
|
||||
CSSProperties,
|
||||
CSSStyles,
|
||||
ExtFormatter,
|
||||
StyleExportDict,
|
||||
StylerRenderer,
|
||||
Subset,
|
||||
)
|
||||
|
||||
class _SeriesFunc(Protocol):
|
||||
def __call__(
|
||||
self, series: Series, /, *args: Any, **kwargs: Any
|
||||
) -> list | Series: ...
|
||||
|
||||
class _DataFrameFunc(Protocol):
|
||||
def __call__(
|
||||
self, series: DataFrame, /, *args: Any, **kwargs: Any
|
||||
) -> npt.NDArray | DataFrame: ...
|
||||
|
||||
class _MapCallable(Protocol):
|
||||
def __call__(
|
||||
self, first_arg: Scalar, /, *args: Any, **kwargs: Any
|
||||
) -> str | None: ...
|
||||
|
||||
class Styler(StylerRenderer):
|
||||
def __init__(
|
||||
self,
|
||||
data: DataFrame | Series,
|
||||
precision: int | None = ...,
|
||||
table_styles: CSSStyles | None = ...,
|
||||
uuid: str | None = ...,
|
||||
caption: str | tuple[str, str] | None = ...,
|
||||
table_attributes: str | None = ...,
|
||||
cell_ids: bool = ...,
|
||||
na_rep: str | None = ...,
|
||||
uuid_len: int = ...,
|
||||
decimal: str | None = ...,
|
||||
thousands: str | None = ...,
|
||||
escape: str | None = ...,
|
||||
formatter: ExtFormatter | None = ...,
|
||||
) -> None: ...
|
||||
def concat(self, other: Styler) -> Styler: ...
|
||||
@overload
|
||||
def map(
|
||||
self,
|
||||
func: Callable[[Scalar], str | None],
|
||||
subset: Subset | None = ...,
|
||||
) -> Styler: ...
|
||||
@overload
|
||||
def map(
|
||||
self,
|
||||
func: _MapCallable,
|
||||
subset: Subset | None = ...,
|
||||
**kwargs: Any,
|
||||
) -> Styler: ...
|
||||
def set_tooltips(
|
||||
self,
|
||||
ttips: DataFrame,
|
||||
props: CSSProperties | None = ...,
|
||||
css_class: str | None = ...,
|
||||
as_title_attribute: bool = ...,
|
||||
) -> Styler: ...
|
||||
def to_excel(
|
||||
self,
|
||||
excel_writer: FilePath | WriteExcelBuffer | ExcelWriter,
|
||||
sheet_name: str = "Sheet1",
|
||||
na_rep: str = "",
|
||||
float_format: str | None = None,
|
||||
columns: list[HashableT1] | None = None,
|
||||
header: list[HashableT2] | bool = True,
|
||||
index: bool = True,
|
||||
index_label: IndexLabel | None = None,
|
||||
startrow: int = 0,
|
||||
startcol: int = 0,
|
||||
engine: Literal["openpyxl", "xlsxwriter"] | None = None,
|
||||
merge_cells: ExcelWriterMergeCells = True,
|
||||
encoding: str | None = None,
|
||||
inf_rep: str = "inf",
|
||||
verbose: bool = True,
|
||||
freeze_panes: tuple[int, int] | None = None,
|
||||
storage_options: StorageOptions | None = None,
|
||||
) -> None: ...
|
||||
@overload
|
||||
def to_latex(
|
||||
self,
|
||||
buf: FilePath | WriteBuffer[str],
|
||||
*,
|
||||
column_format: str | None = ...,
|
||||
position: str | None = ...,
|
||||
position_float: Literal["centering", "raggedleft", "raggedright"] | None = ...,
|
||||
hrules: bool | None = ...,
|
||||
clines: (
|
||||
Literal["all;data", "all;index", "skip-last;data", "skip-last;index"] | None
|
||||
) = ...,
|
||||
label: str | None = ...,
|
||||
caption: str | tuple[str, str] | None = ...,
|
||||
sparse_index: bool | None = ...,
|
||||
sparse_columns: bool | None = ...,
|
||||
multirow_align: Literal["c", "t", "b", "naive"] | None = ...,
|
||||
multicol_align: Literal["r", "c", "l", "naive-l", "naive-r"] | None = ...,
|
||||
siunitx: bool = ...,
|
||||
environment: str | None = ...,
|
||||
encoding: str | None = ...,
|
||||
convert_css: bool = ...,
|
||||
) -> None: ...
|
||||
@overload
|
||||
def to_latex(
|
||||
self,
|
||||
buf: None = ...,
|
||||
*,
|
||||
column_format: str | None = ...,
|
||||
position: str | None = ...,
|
||||
position_float: Literal["centering", "raggedleft", "raggedright"] | None = ...,
|
||||
hrules: bool | None = ...,
|
||||
clines: (
|
||||
Literal["all;data", "all;index", "skip-last;data", "skip-last;index"] | None
|
||||
) = ...,
|
||||
label: str | None = ...,
|
||||
caption: str | tuple[str, str] | None = ...,
|
||||
sparse_index: bool | None = ...,
|
||||
sparse_columns: bool | None = ...,
|
||||
multirow_align: Literal["c", "t", "b", "naive"] | None = ...,
|
||||
multicol_align: Literal["r", "c", "l", "naive-l", "naive-r"] | None = ...,
|
||||
siunitx: bool = ...,
|
||||
environment: str | None = ...,
|
||||
encoding: str | None = ...,
|
||||
convert_css: bool = ...,
|
||||
) -> str: ...
|
||||
@overload
|
||||
def to_html(
|
||||
self,
|
||||
buf: FilePath | WriteBuffer[str],
|
||||
*,
|
||||
table_uuid: str | None = ...,
|
||||
table_attributes: str | None = ...,
|
||||
sparse_index: bool | None = ...,
|
||||
sparse_columns: bool | None = ...,
|
||||
bold_headers: bool = ...,
|
||||
caption: str | None = ...,
|
||||
max_rows: int | None = ...,
|
||||
max_columns: int | None = ...,
|
||||
encoding: str | None = ...,
|
||||
doctype_html: bool = ...,
|
||||
exclude_styles: bool = ...,
|
||||
**kwargs: Any,
|
||||
) -> None: ...
|
||||
@overload
|
||||
def to_html(
|
||||
self,
|
||||
buf: None = ...,
|
||||
*,
|
||||
table_uuid: str | None = ...,
|
||||
table_attributes: str | None = ...,
|
||||
sparse_index: bool | None = ...,
|
||||
sparse_columns: bool | None = ...,
|
||||
bold_headers: bool = ...,
|
||||
caption: str | None = ...,
|
||||
max_rows: int | None = ...,
|
||||
max_columns: int | None = ...,
|
||||
encoding: str | None = ...,
|
||||
doctype_html: bool = ...,
|
||||
exclude_styles: bool = ...,
|
||||
**kwargs: Any,
|
||||
) -> str: ...
|
||||
@overload
|
||||
def to_string(
|
||||
self,
|
||||
buf: FilePath | WriteBuffer[str],
|
||||
*,
|
||||
encoding: str | None = ...,
|
||||
sparse_index: bool | None = ...,
|
||||
sparse_columns: bool | None = ...,
|
||||
max_rows: int | None = ...,
|
||||
max_columns: int | None = ...,
|
||||
delimiter: str = ...,
|
||||
) -> None: ...
|
||||
@overload
|
||||
def to_string(
|
||||
self,
|
||||
buf: None = ...,
|
||||
*,
|
||||
encoding: str | None = ...,
|
||||
sparse_index: bool | None = ...,
|
||||
sparse_columns: bool | None = ...,
|
||||
max_rows: int | None = ...,
|
||||
max_columns: int | None = ...,
|
||||
delimiter: str = ...,
|
||||
) -> str: ...
|
||||
def set_td_classes(self, classes: DataFrame) -> Styler: ...
|
||||
def __copy__(self) -> Styler: ...
|
||||
def __deepcopy__(self, memo) -> Styler: ...
|
||||
def clear(self) -> None: ...
|
||||
@overload
|
||||
def apply(
|
||||
self,
|
||||
func: _SeriesFunc | Callable[[Series], list | Series],
|
||||
axis: Axis = ...,
|
||||
subset: Subset | None = ...,
|
||||
**kwargs: Any,
|
||||
) -> Styler: ...
|
||||
@overload
|
||||
def apply(
|
||||
self,
|
||||
func: _DataFrameFunc | Callable[[DataFrame], npt.NDArray | DataFrame],
|
||||
axis: None,
|
||||
subset: Subset | None = ...,
|
||||
**kwargs: Any,
|
||||
) -> Styler: ...
|
||||
def apply_index(
|
||||
self,
|
||||
func: Callable[[Series], npt.NDArray[np.str_] | list[str] | Series[str]],
|
||||
axis: Axis = ...,
|
||||
level: Level | list[Level] | None = ...,
|
||||
**kwargs: Any,
|
||||
) -> Styler: ...
|
||||
def map_index(
|
||||
self,
|
||||
func: Callable[[Scalar], str | None],
|
||||
axis: Axis = ...,
|
||||
level: Level | list[Level] | None = ...,
|
||||
**kwargs,
|
||||
) -> Styler: ...
|
||||
def set_table_attributes(self, attributes: str) -> Styler: ...
|
||||
def export(self) -> StyleExportDict: ...
|
||||
def use(self, styles: StyleExportDict) -> Styler: ...
|
||||
def set_uuid(self, uuid: str) -> Styler: ...
|
||||
def set_caption(self, caption: str | tuple[str, str]) -> Styler: ...
|
||||
def set_sticky(
|
||||
self,
|
||||
axis: Axis = 0,
|
||||
pixel_size: int | None = None,
|
||||
levels: Level | list[Level] | None = None,
|
||||
) -> Styler: ...
|
||||
def set_table_styles(
|
||||
self,
|
||||
table_styles: dict[HashableT, CSSStyles] | CSSStyles | None = None,
|
||||
axis: Axis = 0,
|
||||
overwrite: bool = True,
|
||||
css_class_names: dict[str, str] | None = None,
|
||||
) -> Styler: ...
|
||||
def hide(
|
||||
self,
|
||||
subset: Subset | None = ...,
|
||||
axis: Axis = ...,
|
||||
level: Level | list[Level] | None = ...,
|
||||
names: bool = ...,
|
||||
) -> Styler: ...
|
||||
def background_gradient(
|
||||
self,
|
||||
cmap: str | Colormap = "PuBu",
|
||||
low: float = 0,
|
||||
high: float = 0,
|
||||
axis: Axis | None = 0,
|
||||
subset: Subset | None = None,
|
||||
text_color_threshold: float = 0.408,
|
||||
vmin: float | None = None,
|
||||
vmax: float | None = None,
|
||||
gmap: (
|
||||
Sequence[float]
|
||||
| Sequence[Sequence[float]]
|
||||
| npt.NDArray
|
||||
| DataFrame
|
||||
| Series
|
||||
| None
|
||||
) = None,
|
||||
) -> Styler: ...
|
||||
def text_gradient(
|
||||
self,
|
||||
cmap: str | Colormap = "PuBu",
|
||||
low: float = 0,
|
||||
high: float = 0,
|
||||
axis: Axis | None = 0,
|
||||
subset: Subset | None = None,
|
||||
vmin: float | None = None,
|
||||
vmax: float | None = None,
|
||||
gmap: (
|
||||
Sequence[float]
|
||||
| Sequence[Sequence[float]]
|
||||
| npt.NDArray
|
||||
| DataFrame
|
||||
| Series
|
||||
| None
|
||||
) = None,
|
||||
) -> Styler: ...
|
||||
def set_properties(
|
||||
self, subset: Subset | None = ..., **kwargs: str | int
|
||||
) -> Styler: ...
|
||||
def bar(
|
||||
self,
|
||||
subset: Subset | None = None,
|
||||
axis: Axis | None = 0,
|
||||
*,
|
||||
color: str | list[str] | tuple[str, str] | None = None,
|
||||
cmap: str | Colormap | None = None,
|
||||
width: float = 100,
|
||||
height: float = 100,
|
||||
align: (
|
||||
Literal["left", "right", "zero", "mid", "mean"]
|
||||
| float
|
||||
| Callable[[Series | npt.NDArray | DataFrame], float]
|
||||
) = "mid",
|
||||
vmin: float | None = None,
|
||||
vmax: float | None = None,
|
||||
props: str = "width: 10em;",
|
||||
) -> Styler: ...
|
||||
def highlight_null(
|
||||
self,
|
||||
color: str | None = "red",
|
||||
subset: Subset | None = None,
|
||||
props: str | None = None,
|
||||
) -> Styler: ...
|
||||
def highlight_max(
|
||||
self,
|
||||
subset: Subset | None = None,
|
||||
color: str = "yellow",
|
||||
axis: Axis | None = 0,
|
||||
props: str | None = None,
|
||||
) -> Styler: ...
|
||||
def highlight_min(
|
||||
self,
|
||||
subset: Subset | None = None,
|
||||
color: str = "yellow",
|
||||
axis: Axis | None = 0,
|
||||
props: str | None = None,
|
||||
) -> Styler: ...
|
||||
def highlight_between(
|
||||
self,
|
||||
subset: Subset | None = None,
|
||||
color: str = "yellow",
|
||||
axis: Axis | None = 0,
|
||||
left: Scalar | list[Scalar] | None = None,
|
||||
right: Scalar | list[Scalar] | None = None,
|
||||
inclusive: IntervalClosedType = "both",
|
||||
props: str | None = None,
|
||||
) -> Styler: ...
|
||||
def highlight_quantile(
|
||||
self,
|
||||
subset: Subset | None = None,
|
||||
color: str = "yellow",
|
||||
axis: Axis | None = 0,
|
||||
q_left: float = 0,
|
||||
q_right: float = 1,
|
||||
interpolation: QuantileInterpolation = "linear",
|
||||
inclusive: IntervalClosedType = "both",
|
||||
props: str | None = None,
|
||||
) -> Styler: ...
|
||||
@classmethod
|
||||
def from_custom_template(
|
||||
cls,
|
||||
searchpath: str | list[str],
|
||||
html_table: str | None = ...,
|
||||
html_style: str | None = ...,
|
||||
) -> type[Styler]: ...
|
||||
def pipe(
|
||||
self,
|
||||
func: Callable[..., T] | tuple[Callable[..., T], str],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> T: ...
|
@ -0,0 +1,91 @@
|
||||
from collections.abc import (
|
||||
Callable,
|
||||
Sequence,
|
||||
)
|
||||
from typing import (
|
||||
Any,
|
||||
Literal,
|
||||
TypedDict,
|
||||
)
|
||||
|
||||
from jinja2.environment import (
|
||||
Environment,
|
||||
Template,
|
||||
)
|
||||
from jinja2.loaders import PackageLoader
|
||||
from pandas import Index
|
||||
from pandas.core.indexing import _IndexSlice
|
||||
from typing_extensions import (
|
||||
Self,
|
||||
TypeAlias,
|
||||
)
|
||||
|
||||
from pandas._typing import (
|
||||
Axis,
|
||||
HashableT,
|
||||
Level,
|
||||
)
|
||||
|
||||
BaseFormatter: TypeAlias = str | Callable[[object], str]
|
||||
ExtFormatter: TypeAlias = BaseFormatter | dict[Any, BaseFormatter | None]
|
||||
CSSPair: TypeAlias = tuple[str, str | float]
|
||||
CSSList: TypeAlias = list[CSSPair]
|
||||
CSSProperties: TypeAlias = str | CSSList
|
||||
|
||||
class CSSDict(TypedDict):
|
||||
selector: str
|
||||
props: CSSProperties
|
||||
|
||||
class StyleExportDict(TypedDict, total=False):
|
||||
apply: Any
|
||||
table_attributes: Any
|
||||
table_styles: Any
|
||||
hide_index: bool
|
||||
hide_columns: bool
|
||||
hide_index_names: bool
|
||||
hide_column_names: bool
|
||||
css: dict[str, str | int]
|
||||
|
||||
CSSStyles: TypeAlias = list[CSSDict]
|
||||
Subset: TypeAlias = _IndexSlice | slice | tuple[slice, ...] | list[HashableT] | Index
|
||||
|
||||
class StylerRenderer:
|
||||
loader: PackageLoader
|
||||
env: Environment
|
||||
template_html: Template
|
||||
template_html_table: Template
|
||||
template_html_style: Template
|
||||
template_latex: Template
|
||||
def format(
|
||||
self,
|
||||
formatter: ExtFormatter | None = None,
|
||||
subset: Subset | None = None,
|
||||
na_rep: str | None = None,
|
||||
precision: int | None = None,
|
||||
decimal: str = ".",
|
||||
thousands: str | None = None,
|
||||
escape: str | None = None,
|
||||
hyperlinks: Literal["html", "latex"] | None = None,
|
||||
) -> Self: ...
|
||||
def format_index(
|
||||
self,
|
||||
formatter: ExtFormatter | None = None,
|
||||
axis: Axis = 0,
|
||||
level: Level | list[Level] | None = None,
|
||||
na_rep: str | None = None,
|
||||
precision: int | None = None,
|
||||
decimal: str = ".",
|
||||
thousands: str | None = None,
|
||||
escape: str | None = None,
|
||||
hyperlinks: Literal["html", "latex"] | None = None,
|
||||
) -> Self: ...
|
||||
def relabel_index(
|
||||
self,
|
||||
labels: Sequence[str] | Index,
|
||||
axis: Axis = ...,
|
||||
level: Level | list[Level] | None = ...,
|
||||
) -> Self: ...
|
||||
@property
|
||||
def columns(self) -> Index: ...
|
||||
@property
|
||||
def index(self) -> Index: ...
|
58
lib/python3.11/site-packages/pandas-stubs/io/html.pyi
Normal file
58
lib/python3.11/site-packages/pandas-stubs/io/html.pyi
Normal file
@ -0,0 +1,58 @@
|
||||
from collections.abc import (
|
||||
Callable,
|
||||
Hashable,
|
||||
Mapping,
|
||||
Sequence,
|
||||
)
|
||||
from re import Pattern
|
||||
from typing import (
|
||||
Any,
|
||||
Literal,
|
||||
)
|
||||
|
||||
from pandas.core.frame import DataFrame
|
||||
|
||||
from pandas._libs.lib import _NoDefaultDoNotUse
|
||||
from pandas._typing import (
|
||||
DtypeBackend,
|
||||
FilePath,
|
||||
HashableT1,
|
||||
HashableT2,
|
||||
HashableT3,
|
||||
HashableT4,
|
||||
HashableT5,
|
||||
HTMLFlavors,
|
||||
ReadBuffer,
|
||||
StorageOptions,
|
||||
)
|
||||
|
||||
def read_html(
|
||||
io: FilePath | ReadBuffer[str],
|
||||
*,
|
||||
match: str | Pattern = ...,
|
||||
flavor: HTMLFlavors | Sequence[HTMLFlavors] | None = ...,
|
||||
header: int | Sequence[int] | None = ...,
|
||||
index_col: int | Sequence[int] | list[HashableT1] | None = ...,
|
||||
skiprows: int | Sequence[int] | slice | None = ...,
|
||||
attrs: dict[str, str] | None = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| Sequence[int]
|
||||
| list[HashableT2] # Cannot be Sequence[Hashable] to prevent str
|
||||
| Sequence[Sequence[Hashable]]
|
||||
| dict[str, Sequence[int]]
|
||||
| dict[str, list[HashableT3]]
|
||||
) = ...,
|
||||
thousands: str = ...,
|
||||
encoding: str | None = ...,
|
||||
decimal: str = ...,
|
||||
converters: Mapping[int | HashableT4, Callable[[str], Any]] | None = ...,
|
||||
na_values: (
|
||||
str | list[str] | dict[HashableT5, str] | dict[HashableT5, list[str]] | None
|
||||
) = ...,
|
||||
keep_default_na: bool = ...,
|
||||
displayed_only: bool = ...,
|
||||
extract_links: Literal["header", "footer", "body", "all"] | None = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
) -> list[DataFrame]: ...
|
@ -0,0 +1,7 @@
|
||||
from pandas.io.json._json import (
|
||||
read_json as read_json,
|
||||
)
|
||||
|
||||
# below are untyped imports so commented out
|
||||
# to_json as to_json,; ujson_dumps as ujson_dumps,; ujson_loads as ujson_loads,
|
||||
from pandas.io.json._table_schema import build_table_schema as build_table_schema
|
240
lib/python3.11/site-packages/pandas-stubs/io/json/_json.pyi
Normal file
240
lib/python3.11/site-packages/pandas-stubs/io/json/_json.pyi
Normal file
@ -0,0 +1,240 @@
|
||||
from collections import abc
|
||||
from collections.abc import Mapping
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
Generic,
|
||||
Literal,
|
||||
overload,
|
||||
)
|
||||
|
||||
from pandas.core.frame import DataFrame
|
||||
from pandas.core.series import Series
|
||||
|
||||
from pandas._libs.lib import _NoDefaultDoNotUse
|
||||
from pandas._typing import (
|
||||
CompressionOptions,
|
||||
DtypeArg,
|
||||
DtypeBackend,
|
||||
FilePath,
|
||||
HashableT,
|
||||
JsonFrameOrient,
|
||||
JsonSeriesOrient,
|
||||
NDFrameT,
|
||||
ReadBuffer,
|
||||
StorageOptions,
|
||||
TimeUnit,
|
||||
)
|
||||
|
||||
@overload
|
||||
def read_json(
|
||||
path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
|
||||
*,
|
||||
orient: JsonSeriesOrient | None = ...,
|
||||
typ: Literal["series"],
|
||||
dtype: bool | Mapping[HashableT, DtypeArg] | None = ...,
|
||||
convert_axes: bool | None = ...,
|
||||
convert_dates: bool | list[str] = ...,
|
||||
keep_default_dates: bool = ...,
|
||||
precise_float: bool = ...,
|
||||
date_unit: TimeUnit | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: (
|
||||
Literal["strict", "ignore", "replace", "backslashreplace", "surrogateescape"]
|
||||
| None
|
||||
) = ...,
|
||||
lines: Literal[True],
|
||||
chunksize: int,
|
||||
compression: CompressionOptions = ...,
|
||||
nrows: int | None = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
engine: Literal["ujson"] = ...,
|
||||
) -> JsonReader[Series]: ...
|
||||
@overload
|
||||
def read_json(
|
||||
path_or_buf: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
orient: JsonSeriesOrient | None = ...,
|
||||
typ: Literal["series"],
|
||||
dtype: bool | Mapping[HashableT, DtypeArg] | None = ...,
|
||||
convert_axes: bool | None = ...,
|
||||
convert_dates: bool | list[str] = ...,
|
||||
keep_default_dates: bool = ...,
|
||||
precise_float: bool = ...,
|
||||
date_unit: TimeUnit | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: (
|
||||
Literal["strict", "ignore", "replace", "backslashreplace", "surrogateescape"]
|
||||
| None
|
||||
) = ...,
|
||||
lines: Literal[True],
|
||||
chunksize: int,
|
||||
compression: CompressionOptions = ...,
|
||||
nrows: int | None = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
engine: Literal["pyarrow"],
|
||||
) -> JsonReader[Series]: ...
|
||||
@overload
|
||||
def read_json(
|
||||
path_or_buf: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
orient: JsonFrameOrient | None = ...,
|
||||
typ: Literal["frame"] = ...,
|
||||
dtype: bool | Mapping[HashableT, DtypeArg] | None = ...,
|
||||
convert_axes: bool | None = ...,
|
||||
convert_dates: bool | list[str] = ...,
|
||||
keep_default_dates: bool = ...,
|
||||
precise_float: bool = ...,
|
||||
date_unit: TimeUnit | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: (
|
||||
Literal["strict", "ignore", "replace", "backslashreplace", "surrogateescape"]
|
||||
| None
|
||||
) = ...,
|
||||
lines: Literal[True],
|
||||
chunksize: int,
|
||||
compression: CompressionOptions = ...,
|
||||
nrows: int | None = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
engine: Literal["ujson"] = ...,
|
||||
) -> JsonReader[DataFrame]: ...
|
||||
@overload
|
||||
def read_json(
|
||||
path_or_buf: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
orient: JsonFrameOrient | None = ...,
|
||||
typ: Literal["frame"] = ...,
|
||||
dtype: bool | Mapping[HashableT, DtypeArg] | None = ...,
|
||||
convert_axes: bool | None = ...,
|
||||
convert_dates: bool | list[str] = ...,
|
||||
keep_default_dates: bool = ...,
|
||||
precise_float: bool = ...,
|
||||
date_unit: TimeUnit | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: (
|
||||
Literal["strict", "ignore", "replace", "backslashreplace", "surrogateescape"]
|
||||
| None
|
||||
) = ...,
|
||||
lines: Literal[True],
|
||||
chunksize: int,
|
||||
compression: CompressionOptions = ...,
|
||||
nrows: int | None = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
engine: Literal["pyarrow"],
|
||||
) -> JsonReader[DataFrame]: ...
|
||||
@overload
|
||||
def read_json(
|
||||
path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
|
||||
*,
|
||||
orient: JsonSeriesOrient | None = ...,
|
||||
typ: Literal["series"],
|
||||
dtype: bool | Mapping[HashableT, DtypeArg] | None = ...,
|
||||
convert_axes: bool | None = ...,
|
||||
convert_dates: bool | list[str] = ...,
|
||||
keep_default_dates: bool = ...,
|
||||
precise_float: bool = ...,
|
||||
date_unit: TimeUnit | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: (
|
||||
Literal["strict", "ignore", "replace", "backslashreplace", "surrogateescape"]
|
||||
| None
|
||||
) = ...,
|
||||
lines: bool = ...,
|
||||
chunksize: None = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
nrows: int | None = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
engine: Literal["ujson"] = ...,
|
||||
) -> Series: ...
|
||||
@overload
|
||||
def read_json(
|
||||
path_or_buf: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
orient: JsonSeriesOrient | None = ...,
|
||||
typ: Literal["series"],
|
||||
dtype: bool | Mapping[HashableT, DtypeArg] | None = ...,
|
||||
convert_axes: bool | None = ...,
|
||||
convert_dates: bool | list[str] = ...,
|
||||
keep_default_dates: bool = ...,
|
||||
precise_float: bool = ...,
|
||||
date_unit: TimeUnit | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: (
|
||||
Literal["strict", "ignore", "replace", "backslashreplace", "surrogateescape"]
|
||||
| None
|
||||
) = ...,
|
||||
lines: Literal[True],
|
||||
chunksize: None = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
nrows: int | None = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
engine: Literal["pyarrow"],
|
||||
) -> Series: ...
|
||||
@overload
|
||||
def read_json(
|
||||
path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
|
||||
*,
|
||||
orient: JsonFrameOrient | None = ...,
|
||||
typ: Literal["frame"] = ...,
|
||||
dtype: bool | Mapping[HashableT, DtypeArg] | None = ...,
|
||||
convert_axes: bool | None = ...,
|
||||
convert_dates: bool | list[str] = ...,
|
||||
keep_default_dates: bool = ...,
|
||||
precise_float: bool = ...,
|
||||
date_unit: TimeUnit | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: (
|
||||
Literal["strict", "ignore", "replace", "backslashreplace", "surrogateescape"]
|
||||
| None
|
||||
) = ...,
|
||||
lines: bool = ...,
|
||||
chunksize: None = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
nrows: int | None = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
engine: Literal["ujson"] = ...,
|
||||
) -> DataFrame: ...
|
||||
@overload
|
||||
def read_json(
|
||||
path_or_buf: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
orient: JsonFrameOrient | None = ...,
|
||||
typ: Literal["frame"] = ...,
|
||||
dtype: bool | Mapping[HashableT, DtypeArg] | None = ...,
|
||||
convert_axes: bool | None = ...,
|
||||
convert_dates: bool | list[str] = ...,
|
||||
keep_default_dates: bool = ...,
|
||||
precise_float: bool = ...,
|
||||
date_unit: TimeUnit | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: (
|
||||
Literal["strict", "ignore", "replace", "backslashreplace", "surrogateescape"]
|
||||
| None
|
||||
) = ...,
|
||||
lines: Literal[True],
|
||||
chunksize: None = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
nrows: int | None = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
engine: Literal["pyarrow"],
|
||||
) -> DataFrame: ...
|
||||
|
||||
class JsonReader(abc.Iterator, Generic[NDFrameT]):
|
||||
def read(self) -> NDFrameT: ...
|
||||
def close(self) -> None: ...
|
||||
def __iter__(self) -> JsonReader[NDFrameT]: ...
|
||||
def __next__(self) -> NDFrameT: ...
|
||||
def __enter__(self) -> JsonReader[NDFrameT]: ...
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
) -> None: ...
|
@ -0,0 +1,14 @@
|
||||
from pandas import DataFrame
|
||||
|
||||
from pandas._typing import IgnoreRaise
|
||||
|
||||
def json_normalize(
|
||||
data: dict | list[dict],
|
||||
record_path: str | list | None = None,
|
||||
meta: str | list[str | list[str]] | None = None,
|
||||
meta_prefix: str | None = None,
|
||||
record_prefix: str | None = None,
|
||||
errors: IgnoreRaise = "raise",
|
||||
sep: str = ".",
|
||||
max_level: int | None = None,
|
||||
) -> DataFrame: ...
|
@ -0,0 +1,13 @@
|
||||
from pandas import (
|
||||
DataFrame,
|
||||
Series,
|
||||
)
|
||||
|
||||
from pandas._typing import JSONSerializable
|
||||
|
||||
def build_table_schema(
|
||||
data: DataFrame | Series,
|
||||
index: bool = True,
|
||||
primary_key: bool | None = True,
|
||||
version: bool = True,
|
||||
) -> dict[str, JSONSerializable]: ...
|
21
lib/python3.11/site-packages/pandas-stubs/io/orc.pyi
Normal file
21
lib/python3.11/site-packages/pandas-stubs/io/orc.pyi
Normal file
@ -0,0 +1,21 @@
|
||||
from typing import Any
|
||||
|
||||
from pandas import DataFrame
|
||||
|
||||
from pandas._libs.lib import _NoDefaultDoNotUse
|
||||
from pandas._typing import (
|
||||
DtypeBackend,
|
||||
FilePath,
|
||||
HashableT,
|
||||
ReadBuffer,
|
||||
)
|
||||
|
||||
def read_orc(
|
||||
path: FilePath | ReadBuffer[bytes],
|
||||
columns: list[HashableT] | None = None,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = "numpy_nullable",
|
||||
# TODO type with the correct pyarrow types
|
||||
# filesystem: pyarrow.fs.FileSystem | fsspec.spec.AbstractFileSystem
|
||||
filesystem: Any | None = None,
|
||||
**kwargs: Any,
|
||||
) -> DataFrame: ...
|
22
lib/python3.11/site-packages/pandas-stubs/io/parquet.pyi
Normal file
22
lib/python3.11/site-packages/pandas-stubs/io/parquet.pyi
Normal file
@ -0,0 +1,22 @@
|
||||
from typing import Any
|
||||
|
||||
from pandas import DataFrame
|
||||
|
||||
from pandas._typing import (
|
||||
DtypeBackend,
|
||||
FilePath,
|
||||
ParquetEngine,
|
||||
ReadBuffer,
|
||||
StorageOptions,
|
||||
)
|
||||
|
||||
def read_parquet(
|
||||
path: FilePath | ReadBuffer[bytes],
|
||||
engine: ParquetEngine = "auto",
|
||||
columns: list[str] | None = None,
|
||||
storage_options: StorageOptions = None,
|
||||
dtype_backend: DtypeBackend = "numpy_nullable",
|
||||
filesystem: Any = None,
|
||||
filters: list[tuple] | list[list[tuple]] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> DataFrame: ...
|
@ -0,0 +1,6 @@
|
||||
from pandas.io.parsers.readers import (
|
||||
TextFileReader as TextFileReader,
|
||||
read_csv as read_csv,
|
||||
read_fwf as read_fwf,
|
||||
read_table as read_table,
|
||||
)
|
492
lib/python3.11/site-packages/pandas-stubs/io/parsers/readers.pyi
Normal file
492
lib/python3.11/site-packages/pandas-stubs/io/parsers/readers.pyi
Normal file
@ -0,0 +1,492 @@
|
||||
from collections import (
|
||||
abc,
|
||||
defaultdict,
|
||||
)
|
||||
from collections.abc import (
|
||||
Callable,
|
||||
Hashable,
|
||||
Mapping,
|
||||
Sequence,
|
||||
)
|
||||
import csv
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
Any,
|
||||
Literal,
|
||||
overload,
|
||||
)
|
||||
|
||||
from pandas.core.frame import DataFrame
|
||||
from typing_extensions import Self
|
||||
|
||||
from pandas._libs.lib import _NoDefaultDoNotUse
|
||||
from pandas._typing import (
|
||||
CompressionOptions,
|
||||
CSVEngine,
|
||||
CSVQuoting,
|
||||
DtypeArg,
|
||||
DtypeBackend,
|
||||
FilePath,
|
||||
HashableT,
|
||||
ListLikeHashable,
|
||||
ReadCsvBuffer,
|
||||
StorageOptions,
|
||||
UsecolsArgType,
|
||||
)
|
||||
|
||||
from pandas.io.common import IOHandles
|
||||
|
||||
@overload
|
||||
def read_csv(
|
||||
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
||||
*,
|
||||
sep: str | None = ...,
|
||||
delimiter: str | None = ...,
|
||||
header: int | Sequence[int] | Literal["infer"] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | str | Sequence[str | int] | Literal[False] | None = ...,
|
||||
usecols: UsecolsArgType[HashableT] = ...,
|
||||
dtype: DtypeArg | defaultdict | None = ...,
|
||||
engine: CSVEngine | None = ...,
|
||||
converters: (
|
||||
Mapping[int | str, Callable[[str], Any]]
|
||||
| Mapping[int, Callable[[str], Any]]
|
||||
| Mapping[str, Callable[[str], Any]]
|
||||
| None
|
||||
) = ...,
|
||||
true_values: list[str] | None = ...,
|
||||
false_values: list[str] | None = ...,
|
||||
skipinitialspace: bool = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[int], bool] | None = ...,
|
||||
skipfooter: int = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
verbose: bool = ...,
|
||||
skip_blank_lines: bool = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| list[int]
|
||||
| list[str]
|
||||
| Sequence[Sequence[int]]
|
||||
| Mapping[str, Sequence[int | str]]
|
||||
| None
|
||||
) = ...,
|
||||
keep_date_col: bool = ...,
|
||||
date_format: dict[Hashable, str] | str | None = ...,
|
||||
dayfirst: bool = ...,
|
||||
cache_dates: bool = ...,
|
||||
iterator: Literal[True],
|
||||
chunksize: int | None = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
thousands: str | None = ...,
|
||||
decimal: str = ...,
|
||||
lineterminator: str | None = ...,
|
||||
quotechar: str = ...,
|
||||
quoting: CSVQuoting = ...,
|
||||
doublequote: bool = ...,
|
||||
escapechar: str | None = ...,
|
||||
comment: str | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: str | None = ...,
|
||||
dialect: str | csv.Dialect | None = ...,
|
||||
on_bad_lines: (
|
||||
Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None]
|
||||
) = ...,
|
||||
delim_whitespace: bool = ...,
|
||||
low_memory: bool = ...,
|
||||
memory_map: bool = ...,
|
||||
float_precision: Literal["high", "legacy", "round_trip"] | None = ...,
|
||||
storage_options: StorageOptions | None = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
) -> TextFileReader: ...
|
||||
@overload
|
||||
def read_csv(
|
||||
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
||||
*,
|
||||
sep: str | None = ...,
|
||||
delimiter: str | None = ...,
|
||||
header: int | Sequence[int] | Literal["infer"] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | str | Sequence[str | int] | Literal[False] | None = ...,
|
||||
usecols: UsecolsArgType[HashableT] = ...,
|
||||
dtype: DtypeArg | defaultdict | None = ...,
|
||||
engine: CSVEngine | None = ...,
|
||||
converters: (
|
||||
Mapping[int | str, Callable[[str], Any]]
|
||||
| Mapping[int, Callable[[str], Any]]
|
||||
| Mapping[str, Callable[[str], Any]]
|
||||
| None
|
||||
) = ...,
|
||||
true_values: list[str] | None = ...,
|
||||
false_values: list[str] | None = ...,
|
||||
skipinitialspace: bool = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[int], bool] | None = ...,
|
||||
skipfooter: int = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
verbose: bool = ...,
|
||||
skip_blank_lines: bool = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| list[int]
|
||||
| list[str]
|
||||
| Sequence[Sequence[int]]
|
||||
| Mapping[str, Sequence[int | str]]
|
||||
| None
|
||||
) = ...,
|
||||
keep_date_col: bool = ...,
|
||||
date_format: dict[Hashable, str] | str | None = ...,
|
||||
dayfirst: bool = ...,
|
||||
cache_dates: bool = ...,
|
||||
iterator: bool = ...,
|
||||
chunksize: int,
|
||||
compression: CompressionOptions = ...,
|
||||
thousands: str | None = ...,
|
||||
decimal: str = ...,
|
||||
lineterminator: str | None = ...,
|
||||
quotechar: str = ...,
|
||||
quoting: CSVQuoting = ...,
|
||||
doublequote: bool = ...,
|
||||
escapechar: str | None = ...,
|
||||
comment: str | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: str | None = ...,
|
||||
dialect: str | csv.Dialect | None = ...,
|
||||
on_bad_lines: (
|
||||
Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None]
|
||||
) = ...,
|
||||
delim_whitespace: bool = ...,
|
||||
low_memory: bool = ...,
|
||||
memory_map: bool = ...,
|
||||
float_precision: Literal["high", "legacy", "round_trip"] | None = ...,
|
||||
storage_options: StorageOptions | None = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
) -> TextFileReader: ...
|
||||
@overload
|
||||
def read_csv(
|
||||
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
||||
*,
|
||||
sep: str | None = ...,
|
||||
delimiter: str | None = ...,
|
||||
header: int | Sequence[int] | Literal["infer"] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | str | Sequence[str | int] | Literal[False] | None = ...,
|
||||
usecols: UsecolsArgType[HashableT] = ...,
|
||||
dtype: DtypeArg | defaultdict | None = ...,
|
||||
engine: CSVEngine | None = ...,
|
||||
converters: (
|
||||
Mapping[int | str, Callable[[str], Any]]
|
||||
| Mapping[int, Callable[[str], Any]]
|
||||
| Mapping[str, Callable[[str], Any]]
|
||||
| None
|
||||
) = ...,
|
||||
true_values: list[str] | None = ...,
|
||||
false_values: list[str] | None = ...,
|
||||
skipinitialspace: bool = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[int], bool] | None = ...,
|
||||
skipfooter: int = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
verbose: bool = ...,
|
||||
skip_blank_lines: bool = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| list[int]
|
||||
| list[str]
|
||||
| Sequence[Sequence[int]]
|
||||
| Mapping[str, Sequence[int | str]]
|
||||
| None
|
||||
) = ...,
|
||||
keep_date_col: bool = ...,
|
||||
date_format: dict[Hashable, str] | str | None = ...,
|
||||
dayfirst: bool = ...,
|
||||
cache_dates: bool = ...,
|
||||
iterator: Literal[False] = ...,
|
||||
chunksize: None = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
thousands: str | None = ...,
|
||||
decimal: str = ...,
|
||||
lineterminator: str | None = ...,
|
||||
quotechar: str = ...,
|
||||
quoting: CSVQuoting = ...,
|
||||
doublequote: bool = ...,
|
||||
escapechar: str | None = ...,
|
||||
comment: str | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: str | None = ...,
|
||||
dialect: str | csv.Dialect | None = ...,
|
||||
on_bad_lines: (
|
||||
Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None]
|
||||
) = ...,
|
||||
delim_whitespace: bool = ...,
|
||||
low_memory: bool = ...,
|
||||
memory_map: bool = ...,
|
||||
float_precision: Literal["high", "legacy", "round_trip"] | None = ...,
|
||||
storage_options: StorageOptions | None = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
) -> DataFrame: ...
|
||||
@overload
|
||||
def read_table(
|
||||
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
||||
*,
|
||||
sep: str | None = ...,
|
||||
delimiter: str | None = ...,
|
||||
header: int | Sequence[int] | Literal["infer"] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | str | Sequence[str | int] | Literal[False] | None = ...,
|
||||
usecols: UsecolsArgType[HashableT] = ...,
|
||||
dtype: DtypeArg | defaultdict | None = ...,
|
||||
engine: CSVEngine | None = ...,
|
||||
converters: (
|
||||
Mapping[int | str, Callable[[str], Any]]
|
||||
| Mapping[int, Callable[[str], Any]]
|
||||
| Mapping[str, Callable[[str], Any]]
|
||||
| None
|
||||
) = ...,
|
||||
true_values: list[str] | None = ...,
|
||||
false_values: list[str] | None = ...,
|
||||
skipinitialspace: bool = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[int], bool] | None = ...,
|
||||
skipfooter: int = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
verbose: bool = ...,
|
||||
skip_blank_lines: bool = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| list[int]
|
||||
| list[str]
|
||||
| Sequence[Sequence[int]]
|
||||
| Mapping[str, Sequence[int | str]]
|
||||
| None
|
||||
) = ...,
|
||||
infer_datetime_format: bool = ...,
|
||||
keep_date_col: bool = ...,
|
||||
date_format: dict[Hashable, str] | str | None = ...,
|
||||
dayfirst: bool = ...,
|
||||
cache_dates: bool = ...,
|
||||
iterator: Literal[True],
|
||||
chunksize: int | None = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
thousands: str | None = ...,
|
||||
decimal: str = ...,
|
||||
lineterminator: str | None = ...,
|
||||
quotechar: str = ...,
|
||||
quoting: CSVQuoting = ...,
|
||||
doublequote: bool = ...,
|
||||
escapechar: str | None = ...,
|
||||
comment: str | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: str | None = ...,
|
||||
dialect: str | csv.Dialect | None = ...,
|
||||
on_bad_lines: (
|
||||
Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None]
|
||||
) = ...,
|
||||
delim_whitespace: bool = ...,
|
||||
low_memory: bool = ...,
|
||||
memory_map: bool = ...,
|
||||
float_precision: Literal["high", "legacy", "round_trip"] | None = ...,
|
||||
storage_options: StorageOptions | None = ...,
|
||||
) -> TextFileReader: ...
|
||||
@overload
|
||||
def read_table(
|
||||
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
||||
*,
|
||||
sep: str | None = ...,
|
||||
delimiter: str | None = ...,
|
||||
header: int | Sequence[int] | Literal["infer"] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | str | Sequence[str | int] | Literal[False] | None = ...,
|
||||
usecols: UsecolsArgType[HashableT] = ...,
|
||||
dtype: DtypeArg | defaultdict | None = ...,
|
||||
engine: CSVEngine | None = ...,
|
||||
converters: (
|
||||
Mapping[int | str, Callable[[str], Any]]
|
||||
| Mapping[int, Callable[[str], Any]]
|
||||
| Mapping[str, Callable[[str], Any]]
|
||||
| None
|
||||
) = ...,
|
||||
true_values: list[str] | None = ...,
|
||||
false_values: list[str] | None = ...,
|
||||
skipinitialspace: bool = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[int], bool] | None = ...,
|
||||
skipfooter: int = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
verbose: bool = ...,
|
||||
skip_blank_lines: bool = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| list[int]
|
||||
| list[str]
|
||||
| Sequence[Sequence[int]]
|
||||
| Mapping[str, Sequence[int | str]]
|
||||
| None
|
||||
) = ...,
|
||||
infer_datetime_format: bool = ...,
|
||||
keep_date_col: bool = ...,
|
||||
date_format: dict[Hashable, str] | str | None = ...,
|
||||
dayfirst: bool = ...,
|
||||
cache_dates: bool = ...,
|
||||
iterator: bool = ...,
|
||||
chunksize: int,
|
||||
compression: CompressionOptions = ...,
|
||||
thousands: str | None = ...,
|
||||
decimal: str = ...,
|
||||
lineterminator: str | None = ...,
|
||||
quotechar: str = ...,
|
||||
quoting: CSVQuoting = ...,
|
||||
doublequote: bool = ...,
|
||||
escapechar: str | None = ...,
|
||||
comment: str | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: str | None = ...,
|
||||
dialect: str | csv.Dialect | None = ...,
|
||||
on_bad_lines: (
|
||||
Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None]
|
||||
) = ...,
|
||||
delim_whitespace: bool = ...,
|
||||
low_memory: bool = ...,
|
||||
memory_map: bool = ...,
|
||||
float_precision: Literal["high", "legacy", "round_trip"] | None = ...,
|
||||
storage_options: StorageOptions | None = ...,
|
||||
) -> TextFileReader: ...
|
||||
@overload
|
||||
def read_table(
|
||||
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
||||
*,
|
||||
sep: str | None = ...,
|
||||
delimiter: str | None = ...,
|
||||
header: int | Sequence[int] | Literal["infer"] | None = ...,
|
||||
names: ListLikeHashable | None = ...,
|
||||
index_col: int | str | Sequence[str | int] | Literal[False] | None = ...,
|
||||
usecols: UsecolsArgType[HashableT] = ...,
|
||||
dtype: DtypeArg | defaultdict | None = ...,
|
||||
engine: CSVEngine | None = ...,
|
||||
converters: (
|
||||
Mapping[int | str, Callable[[str], Any]]
|
||||
| Mapping[int, Callable[[str], Any]]
|
||||
| Mapping[str, Callable[[str], Any]]
|
||||
| None
|
||||
) = ...,
|
||||
true_values: list[str] | None = ...,
|
||||
false_values: list[str] | None = ...,
|
||||
skipinitialspace: bool = ...,
|
||||
skiprows: int | Sequence[int] | Callable[[int], bool] | None = ...,
|
||||
skipfooter: int = ...,
|
||||
nrows: int | None = ...,
|
||||
na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
|
||||
keep_default_na: bool = ...,
|
||||
na_filter: bool = ...,
|
||||
verbose: bool = ...,
|
||||
skip_blank_lines: bool = ...,
|
||||
parse_dates: (
|
||||
bool
|
||||
| list[int]
|
||||
| list[str]
|
||||
| Sequence[Sequence[int]]
|
||||
| Mapping[str, Sequence[int | str]]
|
||||
| None
|
||||
) = ...,
|
||||
infer_datetime_format: bool = ...,
|
||||
keep_date_col: bool = ...,
|
||||
date_format: dict[Hashable, str] | str | None = ...,
|
||||
dayfirst: bool = ...,
|
||||
cache_dates: bool = ...,
|
||||
iterator: Literal[False] = ...,
|
||||
chunksize: None = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
thousands: str | None = ...,
|
||||
decimal: str = ...,
|
||||
lineterminator: str | None = ...,
|
||||
quotechar: str = ...,
|
||||
quoting: CSVQuoting = ...,
|
||||
doublequote: bool = ...,
|
||||
escapechar: str | None = ...,
|
||||
comment: str | None = ...,
|
||||
encoding: str | None = ...,
|
||||
encoding_errors: str | None = ...,
|
||||
dialect: str | csv.Dialect | None = ...,
|
||||
on_bad_lines: (
|
||||
Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None]
|
||||
) = ...,
|
||||
delim_whitespace: bool = ...,
|
||||
low_memory: bool = ...,
|
||||
memory_map: bool = ...,
|
||||
float_precision: Literal["high", "legacy", "round_trip"] | None = ...,
|
||||
storage_options: StorageOptions | None = ...,
|
||||
) -> DataFrame: ...
|
||||
@overload
|
||||
def read_fwf(
|
||||
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
||||
*,
|
||||
colspecs: Sequence[tuple[int, int]] | Literal["infer"] | None = ...,
|
||||
widths: Sequence[int] | None = ...,
|
||||
infer_nrows: int = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
date_format: dict[Hashable, str] | str | None = ...,
|
||||
iterator: Literal[True],
|
||||
chunksize: int | None = ...,
|
||||
**kwds: Any,
|
||||
) -> TextFileReader: ...
|
||||
@overload
|
||||
def read_fwf(
|
||||
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
||||
*,
|
||||
colspecs: Sequence[tuple[int, int]] | Literal["infer"] | None = ...,
|
||||
widths: Sequence[int] | None = ...,
|
||||
infer_nrows: int = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
date_format: dict[Hashable, str] | str | None = ...,
|
||||
iterator: bool = ...,
|
||||
chunksize: int,
|
||||
**kwds: Any,
|
||||
) -> TextFileReader: ...
|
||||
@overload
|
||||
def read_fwf(
|
||||
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
||||
*,
|
||||
colspecs: Sequence[tuple[int, int]] | Literal["infer"] | None = ...,
|
||||
widths: Sequence[int] | None = ...,
|
||||
infer_nrows: int = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
date_format: dict[Hashable, str] | str | None = ...,
|
||||
iterator: Literal[False] = ...,
|
||||
chunksize: None = ...,
|
||||
**kwds: Any,
|
||||
) -> DataFrame: ...
|
||||
|
||||
class TextFileReader(abc.Iterator):
|
||||
engine: CSVEngine
|
||||
orig_options: Mapping[str, Any]
|
||||
chunksize: int | None
|
||||
nrows: int | None
|
||||
squeeze: bool
|
||||
handles: IOHandles | None
|
||||
def __init__(
|
||||
self,
|
||||
f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list,
|
||||
engine: CSVEngine | None = ...,
|
||||
**kwds: Any,
|
||||
) -> None: ...
|
||||
def close(self) -> None: ...
|
||||
def read(self, nrows: int | None = ...) -> DataFrame: ...
|
||||
def get_chunk(self, size: int | None = ...) -> DataFrame: ...
|
||||
def __next__(self) -> DataFrame: ...
|
||||
def __enter__(self) -> Self: ...
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
) -> None: ...
|
14
lib/python3.11/site-packages/pandas-stubs/io/pickle.pyi
Normal file
14
lib/python3.11/site-packages/pandas-stubs/io/pickle.pyi
Normal file
@ -0,0 +1,14 @@
|
||||
from typing import Any
|
||||
|
||||
from pandas._typing import (
|
||||
CompressionOptions,
|
||||
FilePath,
|
||||
ReadPickleBuffer,
|
||||
StorageOptions,
|
||||
)
|
||||
|
||||
def read_pickle(
|
||||
filepath_or_buffer: FilePath | ReadPickleBuffer,
|
||||
compression: CompressionOptions = "infer",
|
||||
storage_options: StorageOptions = None,
|
||||
) -> Any: ...
|
230
lib/python3.11/site-packages/pandas-stubs/io/pytables.pyi
Normal file
230
lib/python3.11/site-packages/pandas-stubs/io/pytables.pyi
Normal file
@ -0,0 +1,230 @@
|
||||
from collections.abc import (
|
||||
Generator,
|
||||
Iterator,
|
||||
Sequence,
|
||||
)
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
Any,
|
||||
Literal,
|
||||
overload,
|
||||
)
|
||||
|
||||
from pandas import (
|
||||
DataFrame,
|
||||
Series,
|
||||
)
|
||||
from pandas.core.computation.pytables import PyTablesExpr
|
||||
from pandas.core.generic import NDFrame
|
||||
from typing_extensions import Self
|
||||
|
||||
from pandas._typing import (
|
||||
FilePath,
|
||||
HashableT,
|
||||
HashableT1,
|
||||
HashableT2,
|
||||
HashableT3,
|
||||
HDFCompLib,
|
||||
)
|
||||
|
||||
Term = PyTablesExpr
|
||||
|
||||
@overload
|
||||
def read_hdf(
|
||||
path_or_buf: FilePath | HDFStore,
|
||||
key: Any | None = ...,
|
||||
mode: Literal["r", "r+", "a"] = ...,
|
||||
errors: Literal[
|
||||
"strict",
|
||||
"ignore",
|
||||
"replace",
|
||||
"surrogateescape",
|
||||
"xmlcharrefreplace",
|
||||
"backslashreplace",
|
||||
"namereplace",
|
||||
] = ...,
|
||||
where: str | Term | Sequence[Term] | None = ...,
|
||||
start: int | None = ...,
|
||||
stop: int | None = ...,
|
||||
columns: list[HashableT] | None = ...,
|
||||
*,
|
||||
iterator: Literal[True],
|
||||
chunksize: int | None = ...,
|
||||
**kwargs: Any,
|
||||
) -> TableIterator: ...
|
||||
@overload
|
||||
def read_hdf(
|
||||
path_or_buf: FilePath | HDFStore,
|
||||
key: Any | None = ...,
|
||||
mode: Literal["r", "r+", "a"] = ...,
|
||||
errors: Literal[
|
||||
"strict",
|
||||
"ignore",
|
||||
"replace",
|
||||
"surrogateescape",
|
||||
"xmlcharrefreplace",
|
||||
"backslashreplace",
|
||||
"namereplace",
|
||||
] = ...,
|
||||
where: str | Term | Sequence[Term] | None = ...,
|
||||
start: int | None = ...,
|
||||
stop: int | None = ...,
|
||||
columns: list[HashableT] | None = ...,
|
||||
iterator: bool = ...,
|
||||
*,
|
||||
chunksize: int,
|
||||
**kwargs: Any,
|
||||
) -> TableIterator: ...
|
||||
@overload
|
||||
def read_hdf(
|
||||
path_or_buf: FilePath | HDFStore,
|
||||
key: Any | None = ...,
|
||||
mode: Literal["r", "r+", "a"] = ...,
|
||||
errors: Literal[
|
||||
"strict",
|
||||
"ignore",
|
||||
"replace",
|
||||
"surrogateescape",
|
||||
"xmlcharrefreplace",
|
||||
"backslashreplace",
|
||||
"namereplace",
|
||||
] = ...,
|
||||
where: str | Term | Sequence[Term] | None = ...,
|
||||
start: int | None = ...,
|
||||
stop: int | None = ...,
|
||||
columns: list[HashableT] | None = ...,
|
||||
iterator: Literal[False] = ...,
|
||||
chunksize: None = ...,
|
||||
**kwargs: Any,
|
||||
) -> DataFrame | Series: ...
|
||||
|
||||
class HDFStore:
|
||||
def __init__(
|
||||
self,
|
||||
path,
|
||||
mode: Literal["a", "w", "r", "r+"] = ...,
|
||||
complevel: int | None = ...,
|
||||
complib: HDFCompLib | None = ...,
|
||||
fletcher32: bool = ...,
|
||||
**kwargs,
|
||||
) -> None: ...
|
||||
def __fspath__(self) -> str: ...
|
||||
def __getitem__(self, key: str) -> DataFrame | Series: ...
|
||||
def __setitem__(self, key: str, value: DataFrame | Series) -> None: ...
|
||||
def __delitem__(self, key: str) -> None: ...
|
||||
def __getattr__(self, name: str) -> DataFrame | Series: ...
|
||||
def __contains__(self, key: str) -> bool: ...
|
||||
def __len__(self) -> int: ...
|
||||
def __enter__(self) -> Self: ...
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
) -> None: ...
|
||||
def keys(self, include="pandas") -> list[str]: ...
|
||||
def __iter__(self) -> Iterator[str]: ...
|
||||
def close(self) -> None: ...
|
||||
@property
|
||||
def is_open(self) -> bool: ...
|
||||
def get(self, key: str) -> DataFrame | Series: ...
|
||||
@overload
|
||||
def select(
|
||||
self,
|
||||
key: str,
|
||||
where: str | Term | Sequence[Term] | None = ...,
|
||||
start: int | None = ...,
|
||||
stop: int | None = ...,
|
||||
columns: list[HashableT] | None = ...,
|
||||
*,
|
||||
iterator: Literal[True],
|
||||
chunksize: int | None = ...,
|
||||
auto_close: bool = ...,
|
||||
) -> TableIterator: ...
|
||||
@overload
|
||||
def select(
|
||||
self,
|
||||
key: str,
|
||||
where: str | Term | Sequence[Term] | None = ...,
|
||||
start: int | None = ...,
|
||||
stop: int | None = ...,
|
||||
columns: list[HashableT] | None = ...,
|
||||
iterator: bool = ...,
|
||||
*,
|
||||
chunksize: int,
|
||||
auto_close: bool = ...,
|
||||
) -> TableIterator: ...
|
||||
@overload
|
||||
def select(
|
||||
self,
|
||||
key: str,
|
||||
where: str | Term | Sequence[Term] | None = ...,
|
||||
start: int | None = ...,
|
||||
stop: int | None = ...,
|
||||
columns: list[HashableT] | None = ...,
|
||||
iterator: Literal[False] = ...,
|
||||
chunksize: None = ...,
|
||||
auto_close: bool = ...,
|
||||
) -> DataFrame | Series: ...
|
||||
def put(
|
||||
self,
|
||||
key: str,
|
||||
value: NDFrame,
|
||||
format: Literal["t", "table", "f", "fixed"] | None = None,
|
||||
index: bool = True,
|
||||
append: bool = False,
|
||||
complib: HDFCompLib | None = None,
|
||||
complevel: int | None = None,
|
||||
min_itemsize: int | dict[HashableT1, int] | None = None,
|
||||
nan_rep: str | None = None,
|
||||
data_columns: Literal[True] | list[HashableT2] | None = None,
|
||||
encoding: str | None = None,
|
||||
errors: Literal[
|
||||
"strict",
|
||||
"ignore",
|
||||
"replace",
|
||||
"surrogateescape",
|
||||
"xmlcharrefreplace",
|
||||
"backslashreplace",
|
||||
"namereplace",
|
||||
] = "strict",
|
||||
track_times: bool = True,
|
||||
dropna: bool = False,
|
||||
) -> None: ...
|
||||
def append(
|
||||
self,
|
||||
key: str,
|
||||
value: NDFrame,
|
||||
format: Literal["t", "table", "f", "fixed"] | None = None,
|
||||
axes: int | None = None,
|
||||
index: bool = True,
|
||||
append: bool = True,
|
||||
complib: HDFCompLib | None = None,
|
||||
complevel: int | None = None,
|
||||
columns: list[HashableT1] | None = None,
|
||||
min_itemsize: int | dict[HashableT2, int] | None = None,
|
||||
nan_rep: str | None = None,
|
||||
chunksize: int | None = None,
|
||||
expectedrows: int | None = None,
|
||||
dropna: bool | None = False,
|
||||
data_columns: Literal[True] | list[HashableT3] | None = None,
|
||||
encoding: str | None = None,
|
||||
errors: Literal[
|
||||
"strict",
|
||||
"ignore",
|
||||
"replace",
|
||||
"surrogateescape",
|
||||
"xmlcharrefreplace",
|
||||
"backslashreplace",
|
||||
"namereplace",
|
||||
] = "strict",
|
||||
) -> None: ...
|
||||
def groups(self) -> list: ...
|
||||
def walk(
|
||||
self, where: str = "/"
|
||||
) -> Generator[tuple[str, list, list[str]], None, None]: ...
|
||||
def info(self) -> str: ...
|
||||
|
||||
class TableIterator:
|
||||
def __iter__(self) -> Iterator[DataFrame | Series]: ...
|
||||
def close(self) -> None: ...
|
@ -0,0 +1 @@
|
||||
from pandas.io.sas.sasreader import read_sas as read_sas
|
@ -0,0 +1,8 @@
|
||||
from pandas import DataFrame
|
||||
|
||||
from pandas.io.sas.sasreader import ReaderBase
|
||||
|
||||
class SAS7BDATReader(ReaderBase):
|
||||
def close(self) -> None: ...
|
||||
def __next__(self) -> DataFrame: ...
|
||||
def read(self, nrows: int | None = ...) -> DataFrame: ...
|
@ -0,0 +1,8 @@
|
||||
import pandas as pd
|
||||
|
||||
from pandas.io.sas.sasreader import ReaderBase
|
||||
|
||||
class XportReader(ReaderBase):
|
||||
def close(self) -> None: ...
|
||||
def __next__(self) -> pd.DataFrame: ...
|
||||
def read(self, nrows: int | None = ...) -> pd.DataFrame: ...
|
107
lib/python3.11/site-packages/pandas-stubs/io/sas/sasreader.pyi
Normal file
107
lib/python3.11/site-packages/pandas-stubs/io/sas/sasreader.pyi
Normal file
@ -0,0 +1,107 @@
|
||||
from abc import (
|
||||
ABCMeta,
|
||||
abstractmethod,
|
||||
)
|
||||
from collections.abc import Hashable
|
||||
from typing import (
|
||||
Literal,
|
||||
overload,
|
||||
)
|
||||
|
||||
from pandas import DataFrame
|
||||
from typing_extensions import Self
|
||||
|
||||
from pandas._typing import (
|
||||
CompressionOptions as CompressionOptions,
|
||||
FilePath as FilePath,
|
||||
ReadBuffer,
|
||||
)
|
||||
|
||||
from pandas.io.sas.sas7bdat import SAS7BDATReader
|
||||
from pandas.io.sas.sas_xport import XportReader
|
||||
|
||||
class ReaderBase(metaclass=ABCMeta):
|
||||
@abstractmethod
|
||||
def read(self, nrows: int | None = ...) -> DataFrame: ...
|
||||
@abstractmethod
|
||||
def close(self) -> None: ...
|
||||
def __enter__(self) -> Self: ...
|
||||
def __exit__(self, exc_type, exc_value, traceback) -> None: ...
|
||||
|
||||
@overload
|
||||
def read_sas(
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
format: Literal["sas7bdat"],
|
||||
index: Hashable | None = ...,
|
||||
encoding: str | None = ...,
|
||||
chunksize: int,
|
||||
iterator: bool = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
) -> SAS7BDATReader: ...
|
||||
@overload
|
||||
def read_sas(
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
format: Literal["xport"],
|
||||
index: Hashable | None = ...,
|
||||
encoding: str | None = ...,
|
||||
chunksize: int,
|
||||
iterator: bool = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
) -> XportReader: ...
|
||||
@overload
|
||||
def read_sas(
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
format: None = ...,
|
||||
index: Hashable | None = ...,
|
||||
encoding: str | None = ...,
|
||||
chunksize: int,
|
||||
iterator: bool = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
) -> XportReader | SAS7BDATReader: ...
|
||||
@overload
|
||||
def read_sas(
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
format: Literal["sas7bdat"],
|
||||
index: Hashable | None = ...,
|
||||
encoding: str | None = ...,
|
||||
chunksize: int | None = ...,
|
||||
iterator: Literal[True],
|
||||
compression: CompressionOptions = ...,
|
||||
) -> SAS7BDATReader: ...
|
||||
@overload
|
||||
def read_sas(
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
format: Literal["xport"],
|
||||
index: Hashable | None = ...,
|
||||
encoding: str | None = ...,
|
||||
chunksize: int | None = ...,
|
||||
iterator: Literal[True],
|
||||
compression: CompressionOptions = ...,
|
||||
) -> XportReader: ...
|
||||
@overload
|
||||
def read_sas(
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
format: None = ...,
|
||||
index: Hashable | None = ...,
|
||||
encoding: str | None = ...,
|
||||
chunksize: int | None = ...,
|
||||
iterator: Literal[True],
|
||||
compression: CompressionOptions = ...,
|
||||
) -> XportReader | SAS7BDATReader: ...
|
||||
@overload
|
||||
def read_sas(
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
format: Literal["xport", "sas7bdat"] | None = ...,
|
||||
index: Hashable | None = ...,
|
||||
encoding: str | None = ...,
|
||||
chunksize: None = ...,
|
||||
iterator: Literal[False] = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
) -> DataFrame: ...
|
15
lib/python3.11/site-packages/pandas-stubs/io/spss.pyi
Normal file
15
lib/python3.11/site-packages/pandas-stubs/io/spss.pyi
Normal file
@ -0,0 +1,15 @@
|
||||
from pandas.core.frame import DataFrame
|
||||
|
||||
from pandas._libs.lib import _NoDefaultDoNotUse
|
||||
from pandas._typing import (
|
||||
DtypeBackend,
|
||||
FilePath,
|
||||
HashableT,
|
||||
)
|
||||
|
||||
def read_spss(
|
||||
path: FilePath,
|
||||
usecols: list[HashableT] | None = None,
|
||||
convert_categoricals: bool = True,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = "numpy_nullable",
|
||||
) -> DataFrame: ...
|
202
lib/python3.11/site-packages/pandas-stubs/io/sql.pyi
Normal file
202
lib/python3.11/site-packages/pandas-stubs/io/sql.pyi
Normal file
@ -0,0 +1,202 @@
|
||||
from collections.abc import (
|
||||
Callable,
|
||||
Generator,
|
||||
Iterable,
|
||||
Mapping,
|
||||
)
|
||||
import sqlite3
|
||||
from typing import (
|
||||
Any,
|
||||
Literal,
|
||||
overload,
|
||||
)
|
||||
|
||||
from pandas.core.frame import DataFrame
|
||||
import sqlalchemy.engine
|
||||
from sqlalchemy.orm import FromStatement
|
||||
import sqlalchemy.sql.expression
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
from pandas._libs.lib import _NoDefaultDoNotUse
|
||||
from pandas._typing import (
|
||||
DtypeArg,
|
||||
DtypeBackend,
|
||||
Scalar,
|
||||
npt,
|
||||
)
|
||||
|
||||
_SQLConnection: TypeAlias = str | sqlalchemy.engine.Connectable | sqlite3.Connection
|
||||
|
||||
_SQLStatement: TypeAlias = (
|
||||
str
|
||||
| sqlalchemy.sql.expression.Selectable
|
||||
| sqlalchemy.sql.expression.TextClause
|
||||
| sqlalchemy.sql.Select
|
||||
| FromStatement
|
||||
| sqlalchemy.sql.expression.UpdateBase
|
||||
)
|
||||
|
||||
@overload
|
||||
def read_sql_table(
|
||||
table_name: str,
|
||||
con: _SQLConnection,
|
||||
schema: str | None = ...,
|
||||
index_col: str | list[str] | None = ...,
|
||||
coerce_float: bool = ...,
|
||||
parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ...,
|
||||
columns: list[str] | None = ...,
|
||||
*,
|
||||
chunksize: int,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
) -> Generator[DataFrame, None, None]: ...
|
||||
@overload
|
||||
def read_sql_table(
|
||||
table_name: str,
|
||||
con: _SQLConnection,
|
||||
schema: str | None = ...,
|
||||
index_col: str | list[str] | None = ...,
|
||||
coerce_float: bool = ...,
|
||||
parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ...,
|
||||
columns: list[str] | None = ...,
|
||||
chunksize: None = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
) -> DataFrame: ...
|
||||
@overload
|
||||
def read_sql_query(
|
||||
sql: _SQLStatement,
|
||||
con: _SQLConnection,
|
||||
index_col: str | list[str] | None = ...,
|
||||
coerce_float: bool = ...,
|
||||
params: (
|
||||
list[Scalar]
|
||||
| tuple[Scalar, ...]
|
||||
| tuple[tuple[Scalar, ...], ...]
|
||||
| Mapping[str, Scalar]
|
||||
| Mapping[str, tuple[Scalar, ...]]
|
||||
| None
|
||||
) = ...,
|
||||
parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ...,
|
||||
*,
|
||||
chunksize: int,
|
||||
dtype: DtypeArg | None = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
) -> Generator[DataFrame, None, None]: ...
|
||||
@overload
|
||||
def read_sql_query(
|
||||
sql: _SQLStatement,
|
||||
con: _SQLConnection,
|
||||
index_col: str | list[str] | None = ...,
|
||||
coerce_float: bool = ...,
|
||||
params: (
|
||||
list[Scalar]
|
||||
| tuple[Scalar, ...]
|
||||
| tuple[tuple[Scalar, ...], ...]
|
||||
| Mapping[str, Scalar]
|
||||
| Mapping[str, tuple[Scalar, ...]]
|
||||
| None
|
||||
) = ...,
|
||||
parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ...,
|
||||
chunksize: None = ...,
|
||||
dtype: DtypeArg | None = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
) -> DataFrame: ...
|
||||
@overload
|
||||
def read_sql(
|
||||
sql: _SQLStatement,
|
||||
con: _SQLConnection,
|
||||
index_col: str | list[str] | None = ...,
|
||||
coerce_float: bool = ...,
|
||||
params: (
|
||||
list[Scalar]
|
||||
| tuple[Scalar, ...]
|
||||
| tuple[tuple[Scalar, ...], ...]
|
||||
| Mapping[str, Scalar]
|
||||
| Mapping[str, tuple[Scalar, ...]]
|
||||
| None
|
||||
) = ...,
|
||||
parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ...,
|
||||
columns: list[str] | None = ...,
|
||||
*,
|
||||
chunksize: int,
|
||||
dtype: DtypeArg | None = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
) -> Generator[DataFrame, None, None]: ...
|
||||
@overload
|
||||
def read_sql(
|
||||
sql: _SQLStatement,
|
||||
con: _SQLConnection,
|
||||
index_col: str | list[str] | None = ...,
|
||||
coerce_float: bool = ...,
|
||||
params: (
|
||||
list[Scalar]
|
||||
| tuple[Scalar, ...]
|
||||
| tuple[tuple[Scalar, ...], ...]
|
||||
| Mapping[str, Scalar]
|
||||
| Mapping[str, tuple[Scalar, ...]]
|
||||
| None
|
||||
) = ...,
|
||||
parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ...,
|
||||
columns: list[str] | None = ...,
|
||||
chunksize: None = ...,
|
||||
dtype: DtypeArg | None = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
) -> DataFrame: ...
|
||||
|
||||
class PandasSQL:
|
||||
def to_sql(
|
||||
self,
|
||||
frame: DataFrame,
|
||||
name: str,
|
||||
if_exists: Literal["fail", "replace", "append"] = ...,
|
||||
index: bool = ...,
|
||||
index_label=...,
|
||||
schema: str | None = ...,
|
||||
chunksize=...,
|
||||
dtype: DtypeArg | None = ...,
|
||||
method: (
|
||||
Literal["multi"]
|
||||
| Callable[[SQLTable, Any, list[str], Iterable], int | None]
|
||||
| None
|
||||
) = ...,
|
||||
engine: str = ...,
|
||||
**engine_kwargs: dict[str, Any] | None,
|
||||
) -> int | None: ...
|
||||
|
||||
class SQLTable:
|
||||
name: str
|
||||
pd_sql: PandasSQL # pandas SQL interface
|
||||
prefix: str
|
||||
frame: DataFrame | None
|
||||
index: list[str]
|
||||
schema: str
|
||||
if_exists: Literal["fail", "replace", "append"]
|
||||
keys: list[str]
|
||||
dtype: DtypeArg | None
|
||||
table: Any # sqlalchemy.Table
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
pandas_sql_engine: PandasSQL,
|
||||
frame: DataFrame | None = ...,
|
||||
index: bool | str | list[str] | None = ...,
|
||||
if_exists: Literal["fail", "replace", "append"] = ...,
|
||||
prefix: str = ...,
|
||||
index_label: str | list[str] | None = ...,
|
||||
schema: str | None = ...,
|
||||
keys: str | list[str] | None = ...,
|
||||
dtype: DtypeArg | None = ...,
|
||||
) -> None: ...
|
||||
def exists(self) -> bool: ...
|
||||
def sql_schema(self) -> str: ...
|
||||
def create(self) -> None: ...
|
||||
def insert_data(self) -> tuple[list[str], list[npt.NDArray]]: ...
|
||||
def insert(
|
||||
self, chunksize: int | None = ..., method: str | None = ...
|
||||
) -> int | None: ...
|
||||
def read(
|
||||
self,
|
||||
coerce_float: bool = ...,
|
||||
parse_dates: bool | list[str] | None = ...,
|
||||
columns: list[str] | None = ...,
|
||||
chunksize: int | None = ...,
|
||||
) -> DataFrame | Generator[DataFrame, None, None]: ...
|
125
lib/python3.11/site-packages/pandas-stubs/io/stata.pyi
Normal file
125
lib/python3.11/site-packages/pandas-stubs/io/stata.pyi
Normal file
@ -0,0 +1,125 @@
|
||||
from collections import abc
|
||||
from collections.abc import Sequence
|
||||
import datetime
|
||||
from io import BytesIO
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
Literal,
|
||||
overload,
|
||||
)
|
||||
|
||||
from pandas.core.frame import DataFrame
|
||||
from typing_extensions import Self
|
||||
|
||||
from pandas._typing import (
|
||||
CompressionOptions,
|
||||
FilePath,
|
||||
HashableT,
|
||||
HashableT1,
|
||||
HashableT2,
|
||||
HashableT3,
|
||||
ReadBuffer,
|
||||
StataDateFormat,
|
||||
StorageOptions,
|
||||
WriteBuffer,
|
||||
)
|
||||
|
||||
@overload
|
||||
def read_stata(
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
convert_dates: bool = ...,
|
||||
convert_categoricals: bool = ...,
|
||||
index_col: str | None = ...,
|
||||
convert_missing: bool = ...,
|
||||
preserve_dtypes: bool = ...,
|
||||
columns: list[HashableT] | None = ...,
|
||||
order_categoricals: bool = ...,
|
||||
chunksize: int | None = ...,
|
||||
iterator: Literal[True],
|
||||
compression: CompressionOptions = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
) -> StataReader: ...
|
||||
@overload
|
||||
def read_stata(
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
convert_dates: bool = ...,
|
||||
convert_categoricals: bool = ...,
|
||||
index_col: str | None = ...,
|
||||
convert_missing: bool = ...,
|
||||
preserve_dtypes: bool = ...,
|
||||
columns: list[HashableT] | None = ...,
|
||||
order_categoricals: bool = ...,
|
||||
chunksize: int,
|
||||
iterator: bool = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
) -> StataReader: ...
|
||||
@overload
|
||||
def read_stata(
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
*,
|
||||
convert_dates: bool = ...,
|
||||
convert_categoricals: bool = ...,
|
||||
index_col: str | None = ...,
|
||||
convert_missing: bool = ...,
|
||||
preserve_dtypes: bool = ...,
|
||||
columns: list[HashableT] | None = ...,
|
||||
order_categoricals: bool = ...,
|
||||
chunksize: None = ...,
|
||||
iterator: Literal[False] = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
) -> DataFrame: ...
|
||||
|
||||
class StataParser:
|
||||
def __init__(self) -> None: ...
|
||||
|
||||
class StataReader(StataParser, abc.Iterator):
|
||||
col_sizes: list[int] = ...
|
||||
path_or_buf: BytesIO = ...
|
||||
def __init__(
|
||||
self,
|
||||
path_or_buf: FilePath | ReadBuffer[bytes],
|
||||
convert_dates: bool = ...,
|
||||
convert_categoricals: bool = ...,
|
||||
index_col: str | None = ...,
|
||||
convert_missing: bool = ...,
|
||||
preserve_dtypes: bool = ...,
|
||||
columns: Sequence[str] | None = ...,
|
||||
order_categoricals: bool = ...,
|
||||
chunksize: int | None = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
) -> None: ...
|
||||
def __enter__(self) -> Self: ...
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
) -> None: ...
|
||||
def __next__(self) -> DataFrame: ...
|
||||
@property
|
||||
def data_label(self) -> str: ...
|
||||
def variable_labels(self) -> dict[str, str]: ...
|
||||
def value_labels(self) -> dict[str, dict[float, str]]: ...
|
||||
|
||||
class StataWriter(StataParser):
|
||||
def __init__(
|
||||
self,
|
||||
fname: FilePath | WriteBuffer[bytes],
|
||||
data: DataFrame,
|
||||
convert_dates: dict[HashableT1, StataDateFormat] | None = ...,
|
||||
write_index: bool = ...,
|
||||
byteorder: str | None = ...,
|
||||
time_stamp: datetime.datetime | None = ...,
|
||||
data_label: str | None = ...,
|
||||
variable_labels: dict[HashableT2, str] | None = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
*,
|
||||
value_labels: dict[HashableT3, dict[float, str]] | None = ...,
|
||||
) -> None: ...
|
||||
def write_file(self) -> None: ...
|
37
lib/python3.11/site-packages/pandas-stubs/io/xml.pyi
Normal file
37
lib/python3.11/site-packages/pandas-stubs/io/xml.pyi
Normal file
@ -0,0 +1,37 @@
|
||||
from collections.abc import Sequence
|
||||
|
||||
from pandas.core.frame import DataFrame
|
||||
|
||||
from pandas._libs.lib import _NoDefaultDoNotUse
|
||||
from pandas._typing import (
|
||||
CompressionOptions,
|
||||
ConvertersArg,
|
||||
DtypeArg,
|
||||
DtypeBackend,
|
||||
FilePath,
|
||||
ParseDatesArg,
|
||||
ReadBuffer,
|
||||
StorageOptions,
|
||||
XMLParsers,
|
||||
)
|
||||
|
||||
def read_xml(
|
||||
path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str],
|
||||
*,
|
||||
xpath: str = ...,
|
||||
namespaces: dict[str, str] | None = ...,
|
||||
elems_only: bool = ...,
|
||||
attrs_only: bool = ...,
|
||||
names: Sequence[str] | None = ...,
|
||||
dtype: DtypeArg | None = ...,
|
||||
converters: ConvertersArg | None = ...,
|
||||
parse_dates: ParseDatesArg | None = ...,
|
||||
# encoding can not be None for lxml and StringIO input
|
||||
encoding: str | None = ...,
|
||||
parser: XMLParsers = ...,
|
||||
stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None = ...,
|
||||
iterparse: dict[str, list[str]] | None = ...,
|
||||
compression: CompressionOptions = ...,
|
||||
storage_options: StorageOptions = ...,
|
||||
dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ...,
|
||||
) -> DataFrame: ...
|
Reference in New Issue
Block a user