This commit is contained in:
2025-09-07 22:09:54 +02:00
parent e1b817252c
commit 2fc0d000b6
7796 changed files with 2159515 additions and 933 deletions

View File

@ -0,0 +1,10 @@
import collections
import numpy as np
def test_no_duplicates_in_np__all__():
# Regression test for gh-10198.
dups = {k: v for k, v in collections.Counter(np.__all__).items() if v > 1}
assert len(dups) == 0

View File

@ -0,0 +1,48 @@
import importlib
import importlib.metadata
import os
import pathlib
import subprocess
import pytest
import numpy as np
import numpy._core.include
import numpy._core.lib.pkgconfig
from numpy.testing import IS_EDITABLE, IS_INSTALLED, IS_WASM, NUMPY_ROOT
INCLUDE_DIR = NUMPY_ROOT / '_core' / 'include'
PKG_CONFIG_DIR = NUMPY_ROOT / '_core' / 'lib' / 'pkgconfig'
@pytest.mark.skipif(not IS_INSTALLED, reason="`numpy-config` not expected to be installed")
@pytest.mark.skipif(IS_WASM, reason="wasm interpreter cannot start subprocess")
class TestNumpyConfig:
def check_numpyconfig(self, arg):
p = subprocess.run(['numpy-config', arg], capture_output=True, text=True)
p.check_returncode()
return p.stdout.strip()
def test_configtool_version(self):
stdout = self.check_numpyconfig('--version')
assert stdout == np.__version__
def test_configtool_cflags(self):
stdout = self.check_numpyconfig('--cflags')
assert f'-I{os.fspath(INCLUDE_DIR)}' in stdout
def test_configtool_pkgconfigdir(self):
stdout = self.check_numpyconfig('--pkgconfigdir')
assert pathlib.Path(stdout) == PKG_CONFIG_DIR
@pytest.mark.skipif(not IS_INSTALLED, reason="numpy must be installed to check its entrypoints")
def test_pkg_config_entrypoint():
(entrypoint,) = importlib.metadata.entry_points(group='pkg_config', name='numpy')
assert entrypoint.value == numpy._core.lib.pkgconfig.__name__
@pytest.mark.skipif(not IS_INSTALLED, reason="numpy.pc is only available when numpy is installed")
@pytest.mark.skipif(IS_EDITABLE, reason="editable installs don't have a numpy.pc")
def test_pkg_config_config_exists():
assert PKG_CONFIG_DIR.joinpath('numpy.pc').is_file()

View File

@ -0,0 +1,377 @@
import sys
import sysconfig
import weakref
from pathlib import Path
import pytest
import numpy as np
from numpy.ctypeslib import as_array, load_library, ndpointer
from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises
try:
import ctypes
except ImportError:
ctypes = None
else:
cdll = None
test_cdll = None
if hasattr(sys, 'gettotalrefcount'):
try:
cdll = load_library(
'_multiarray_umath_d', np._core._multiarray_umath.__file__
)
except OSError:
pass
try:
test_cdll = load_library(
'_multiarray_tests', np._core._multiarray_tests.__file__
)
except OSError:
pass
if cdll is None:
cdll = load_library(
'_multiarray_umath', np._core._multiarray_umath.__file__)
if test_cdll is None:
test_cdll = load_library(
'_multiarray_tests', np._core._multiarray_tests.__file__
)
c_forward_pointer = test_cdll.forward_pointer
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available in this python")
@pytest.mark.skipif(sys.platform == 'cygwin',
reason="Known to fail on cygwin")
class TestLoadLibrary:
def test_basic(self):
loader_path = np._core._multiarray_umath.__file__
out1 = load_library('_multiarray_umath', loader_path)
out2 = load_library(Path('_multiarray_umath'), loader_path)
out3 = load_library('_multiarray_umath', Path(loader_path))
out4 = load_library(b'_multiarray_umath', loader_path)
assert isinstance(out1, ctypes.CDLL)
assert out1 is out2 is out3 is out4
def test_basic2(self):
# Regression for #801: load_library with a full library name
# (including extension) does not work.
try:
so_ext = sysconfig.get_config_var('EXT_SUFFIX')
load_library(f'_multiarray_umath{so_ext}',
np._core._multiarray_umath.__file__)
except ImportError as e:
msg = ("ctypes is not available on this python: skipping the test"
" (import error was: %s)" % str(e))
print(msg)
class TestNdpointer:
def test_dtype(self):
dt = np.intc
p = ndpointer(dtype=dt)
assert_(p.from_param(np.array([1], dt)))
dt = '<i4'
p = ndpointer(dtype=dt)
assert_(p.from_param(np.array([1], dt)))
dt = np.dtype('>i4')
p = ndpointer(dtype=dt)
p.from_param(np.array([1], dt))
assert_raises(TypeError, p.from_param,
np.array([1], dt.newbyteorder('swap')))
dtnames = ['x', 'y']
dtformats = [np.intc, np.float64]
dtdescr = {'names': dtnames, 'formats': dtformats}
dt = np.dtype(dtdescr)
p = ndpointer(dtype=dt)
assert_(p.from_param(np.zeros((10,), dt)))
samedt = np.dtype(dtdescr)
p = ndpointer(dtype=samedt)
assert_(p.from_param(np.zeros((10,), dt)))
dt2 = np.dtype(dtdescr, align=True)
if dt.itemsize != dt2.itemsize:
assert_raises(TypeError, p.from_param, np.zeros((10,), dt2))
else:
assert_(p.from_param(np.zeros((10,), dt2)))
def test_ndim(self):
p = ndpointer(ndim=0)
assert_(p.from_param(np.array(1)))
assert_raises(TypeError, p.from_param, np.array([1]))
p = ndpointer(ndim=1)
assert_raises(TypeError, p.from_param, np.array(1))
assert_(p.from_param(np.array([1])))
p = ndpointer(ndim=2)
assert_(p.from_param(np.array([[1]])))
def test_shape(self):
p = ndpointer(shape=(1, 2))
assert_(p.from_param(np.array([[1, 2]])))
assert_raises(TypeError, p.from_param, np.array([[1], [2]]))
p = ndpointer(shape=())
assert_(p.from_param(np.array(1)))
def test_flags(self):
x = np.array([[1, 2], [3, 4]], order='F')
p = ndpointer(flags='FORTRAN')
assert_(p.from_param(x))
p = ndpointer(flags='CONTIGUOUS')
assert_raises(TypeError, p.from_param, x)
p = ndpointer(flags=x.flags.num)
assert_(p.from_param(x))
assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]]))
def test_cache(self):
assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64))
# shapes are normalized
assert_(ndpointer(shape=2) is ndpointer(shape=(2,)))
# 1.12 <= v < 1.16 had a bug that made these fail
assert_(ndpointer(shape=2) is not ndpointer(ndim=2))
assert_(ndpointer(ndim=2) is not ndpointer(shape=2))
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestNdpointerCFunc:
def test_arguments(self):
""" Test that arguments are coerced from arrays """
c_forward_pointer.restype = ctypes.c_void_p
c_forward_pointer.argtypes = (ndpointer(ndim=2),)
c_forward_pointer(np.zeros((2, 3)))
# too many dimensions
assert_raises(
ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4)))
@pytest.mark.parametrize(
'dt', [
float,
np.dtype({
'formats': ['<i4', '<i4'],
'names': ['a', 'b'],
'offsets': [0, 2],
'itemsize': 6
})
], ids=[
'float',
'overlapping-fields'
]
)
def test_return(self, dt):
""" Test that return values are coerced to arrays """
arr = np.zeros((2, 3), dt)
ptr_type = ndpointer(shape=arr.shape, dtype=arr.dtype)
c_forward_pointer.restype = ptr_type
c_forward_pointer.argtypes = (ptr_type,)
# check that the arrays are equivalent views on the same data
arr2 = c_forward_pointer(arr)
assert_equal(arr2.dtype, arr.dtype)
assert_equal(arr2.shape, arr.shape)
assert_equal(
arr2.__array_interface__['data'],
arr.__array_interface__['data']
)
def test_vague_return_value(self):
""" Test that vague ndpointer return values do not promote to arrays """
arr = np.zeros((2, 3))
ptr_type = ndpointer(dtype=arr.dtype)
c_forward_pointer.restype = ptr_type
c_forward_pointer.argtypes = (ptr_type,)
ret = c_forward_pointer(arr)
assert_(isinstance(ret, ptr_type))
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestAsArray:
def test_array(self):
from ctypes import c_int
pair_t = c_int * 2
a = as_array(pair_t(1, 2))
assert_equal(a.shape, (2,))
assert_array_equal(a, np.array([1, 2]))
a = as_array((pair_t * 3)(pair_t(1, 2), pair_t(3, 4), pair_t(5, 6)))
assert_equal(a.shape, (3, 2))
assert_array_equal(a, np.array([[1, 2], [3, 4], [5, 6]]))
def test_pointer(self):
from ctypes import POINTER, c_int, cast
p = cast((c_int * 10)(*range(10)), POINTER(c_int))
a = as_array(p, shape=(10,))
assert_equal(a.shape, (10,))
assert_array_equal(a, np.arange(10))
a = as_array(p, shape=(2, 5))
assert_equal(a.shape, (2, 5))
assert_array_equal(a, np.arange(10).reshape((2, 5)))
# shape argument is required
assert_raises(TypeError, as_array, p)
@pytest.mark.skipif(
sys.version_info[:2] == (3, 12),
reason="Broken in 3.12.0rc1, see gh-24399",
)
def test_struct_array_pointer(self):
from ctypes import Structure, c_int16, pointer
class Struct(Structure):
_fields_ = [('a', c_int16)]
Struct3 = 3 * Struct
c_array = (2 * Struct3)(
Struct3(Struct(a=1), Struct(a=2), Struct(a=3)),
Struct3(Struct(a=4), Struct(a=5), Struct(a=6))
)
expected = np.array([
[(1,), (2,), (3,)],
[(4,), (5,), (6,)],
], dtype=[('a', np.int16)])
def check(x):
assert_equal(x.dtype, expected.dtype)
assert_equal(x, expected)
# all of these should be equivalent
check(as_array(c_array))
check(as_array(pointer(c_array), shape=()))
check(as_array(pointer(c_array[0]), shape=(2,)))
check(as_array(pointer(c_array[0][0]), shape=(2, 3)))
def test_reference_cycles(self):
# related to gh-6511
import ctypes
# create array to work with
# don't use int/long to avoid running into bpo-10746
N = 100
a = np.arange(N, dtype=np.short)
# get pointer to array
pnt = np.ctypeslib.as_ctypes(a)
with np.testing.assert_no_gc_cycles():
# decay the array above to a pointer to its first element
newpnt = ctypes.cast(pnt, ctypes.POINTER(ctypes.c_short))
# and construct an array using this data
b = np.ctypeslib.as_array(newpnt, (N,))
# now delete both, which should cleanup both objects
del newpnt, b
def test_segmentation_fault(self):
arr = np.zeros((224, 224, 3))
c_arr = np.ctypeslib.as_ctypes(arr)
arr_ref = weakref.ref(arr)
del arr
# check the reference wasn't cleaned up
assert_(arr_ref() is not None)
# check we avoid the segfault
c_arr[0][0][0]
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestAsCtypesType:
""" Test conversion from dtypes to ctypes types """
def test_scalar(self):
dt = np.dtype('<u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16.__ctype_le__)
dt = np.dtype('>u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16.__ctype_be__)
dt = np.dtype('u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16)
def test_subarray(self):
dt = np.dtype((np.int32, (2, 3)))
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, 2 * (3 * ctypes.c_int32))
def test_structure(self):
dt = np.dtype([
('a', np.uint16),
('b', np.uint32),
])
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Structure))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
])
def test_structure_aligned(self):
dt = np.dtype([
('a', np.uint16),
('b', np.uint32),
], align=True)
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Structure))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('', ctypes.c_char * 2), # padding
('b', ctypes.c_uint32),
])
def test_union(self):
dt = np.dtype({
'names': ['a', 'b'],
'offsets': [0, 0],
'formats': [np.uint16, np.uint32]
})
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Union))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
])
def test_padded_union(self):
dt = np.dtype({
'names': ['a', 'b'],
'offsets': [0, 0],
'formats': [np.uint16, np.uint32],
'itemsize': 5,
})
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Union))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
('', ctypes.c_char * 5), # padding
])
def test_overlapping(self):
dt = np.dtype({
'names': ['a', 'b'],
'offsets': [0, 2],
'formats': [np.uint32, np.uint32]
})
assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt)

View File

@ -0,0 +1,38 @@
import sys
from importlib.util import LazyLoader, find_spec, module_from_spec
import pytest
# Warning raised by _reload_guard() in numpy/__init__.py
@pytest.mark.filterwarnings("ignore:The NumPy module was reloaded")
def test_lazy_load():
# gh-22045. lazyload doesn't import submodule names into the namespace
# muck with sys.modules to test the importing system
old_numpy = sys.modules.pop("numpy")
numpy_modules = {}
for mod_name, mod in list(sys.modules.items()):
if mod_name[:6] == "numpy.":
numpy_modules[mod_name] = mod
sys.modules.pop(mod_name)
try:
# create lazy load of numpy as np
spec = find_spec("numpy")
module = module_from_spec(spec)
sys.modules["numpy"] = module
loader = LazyLoader(spec.loader)
loader.exec_module(module)
np = module
# test a subpackage import
from numpy.lib import recfunctions # noqa: F401
# test triggering the import of the package
np.ndarray
finally:
if old_numpy:
sys.modules["numpy"] = old_numpy
sys.modules.update(numpy_modules)

View File

@ -0,0 +1,59 @@
import numpy as np
import numpy.matlib
from numpy.testing import assert_, assert_array_equal
def test_empty():
x = numpy.matlib.empty((2,))
assert_(isinstance(x, np.matrix))
assert_(x.shape, (1, 2))
def test_ones():
assert_array_equal(numpy.matlib.ones((2, 3)),
np.matrix([[ 1., 1., 1.],
[ 1., 1., 1.]]))
assert_array_equal(numpy.matlib.ones(2), np.matrix([[ 1., 1.]]))
def test_zeros():
assert_array_equal(numpy.matlib.zeros((2, 3)),
np.matrix([[ 0., 0., 0.],
[ 0., 0., 0.]]))
assert_array_equal(numpy.matlib.zeros(2), np.matrix([[0., 0.]]))
def test_identity():
x = numpy.matlib.identity(2, dtype=int)
assert_array_equal(x, np.matrix([[1, 0], [0, 1]]))
def test_eye():
xc = numpy.matlib.eye(3, k=1, dtype=int)
assert_array_equal(xc, np.matrix([[ 0, 1, 0],
[ 0, 0, 1],
[ 0, 0, 0]]))
assert xc.flags.c_contiguous
assert not xc.flags.f_contiguous
xf = numpy.matlib.eye(3, 4, dtype=int, order='F')
assert_array_equal(xf, np.matrix([[ 1, 0, 0, 0],
[ 0, 1, 0, 0],
[ 0, 0, 1, 0]]))
assert not xf.flags.c_contiguous
assert xf.flags.f_contiguous
def test_rand():
x = numpy.matlib.rand(3)
# check matrix type, array would have shape (3,)
assert_(x.ndim == 2)
def test_randn():
x = np.matlib.randn(3)
# check matrix type, array would have shape (3,)
assert_(x.ndim == 2)
def test_repmat():
a1 = np.arange(4)
x = numpy.matlib.repmat(a1, 2, 2)
y = np.array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
assert_array_equal(x, y)

View File

@ -0,0 +1,46 @@
"""
Check the numpy config is valid.
"""
from unittest.mock import patch
import pytest
import numpy as np
pytestmark = pytest.mark.skipif(
not hasattr(np.__config__, "_built_with_meson"),
reason="Requires Meson builds",
)
class TestNumPyConfigs:
REQUIRED_CONFIG_KEYS = [
"Compilers",
"Machine Information",
"Python Information",
]
@patch("numpy.__config__._check_pyyaml")
def test_pyyaml_not_found(self, mock_yaml_importer):
mock_yaml_importer.side_effect = ModuleNotFoundError()
with pytest.warns(UserWarning):
np.show_config()
def test_dict_mode(self):
config = np.show_config(mode="dicts")
assert isinstance(config, dict)
assert all(key in config for key in self.REQUIRED_CONFIG_KEYS), (
"Required key missing,"
" see index of `False` with `REQUIRED_CONFIG_KEYS`"
)
def test_invalid_mode(self):
with pytest.raises(AttributeError):
np.show_config(mode="foo")
def test_warn_to_add_tests(self):
assert len(np.__config__.DisplayModes) == 2, (
"New mode detected,"
" please add UT if applicable and increment this count"
)

View File

@ -0,0 +1,54 @@
"""
Check the numpy version is valid.
Note that a development version is marked by the presence of 'dev0' or '+'
in the version string, all else is treated as a release. The version string
itself is set from the output of ``git describe`` which relies on tags.
Examples
--------
Valid Development: 1.22.0.dev0 1.22.0.dev0+5-g7999db4df2 1.22.0+5-g7999db4df2
Valid Release: 1.21.0.rc1, 1.21.0.b1, 1.21.0
Invalid: 1.22.0.dev, 1.22.0.dev0-5-g7999db4dfB, 1.21.0.d1, 1.21.a
Note that a release is determined by the version string, which in turn
is controlled by the result of the ``git describe`` command.
"""
import re
import numpy as np
from numpy.testing import assert_
def test_valid_numpy_version():
# Verify that the numpy version is a valid one (no .post suffix or other
# nonsense). See gh-6431 for an issue caused by an invalid version.
version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(a[0-9]|b[0-9]|rc[0-9])?"
dev_suffix = r"(\.dev[0-9]+(\+git[0-9]+\.[0-9a-f]+)?)?"
res = re.match(version_pattern + dev_suffix + '$', np.__version__)
assert_(res is not None, np.__version__)
def test_short_version():
# Check numpy.short_version actually exists
if np.version.release:
assert_(np.__version__ == np.version.short_version,
"short_version mismatch in release version")
else:
assert_(np.__version__.split("+")[0] == np.version.short_version,
"short_version mismatch in development version")
def test_version_module():
contents = {s for s in dir(np.version) if not s.startswith('_')}
expected = {
'full_version',
'git_revision',
'release',
'short_version',
'version',
}
assert contents == expected

View File

@ -0,0 +1,806 @@
import functools
import importlib
import inspect
import pkgutil
import subprocess
import sys
import sysconfig
import types
import warnings
import pytest
import numpy
import numpy as np
from numpy.testing import IS_WASM
try:
import ctypes
except ImportError:
ctypes = None
def check_dir(module, module_name=None):
"""Returns a mapping of all objects with the wrong __module__ attribute."""
if module_name is None:
module_name = module.__name__
results = {}
for name in dir(module):
if name == "core":
continue
item = getattr(module, name)
if (hasattr(item, '__module__') and hasattr(item, '__name__')
and item.__module__ != module_name):
results[name] = item.__module__ + '.' + item.__name__
return results
def test_numpy_namespace():
# We override dir to not show these members
allowlist = {
'recarray': 'numpy.rec.recarray',
}
bad_results = check_dir(np)
# pytest gives better error messages with the builtin assert than with
# assert_equal
assert bad_results == allowlist
@pytest.mark.skipif(IS_WASM, reason="can't start subprocess")
@pytest.mark.parametrize('name', ['testing'])
def test_import_lazy_import(name):
"""Make sure we can actually use the modules we lazy load.
While not exported as part of the public API, it was accessible. With the
use of __getattr__ and __dir__, this isn't always true It can happen that
an infinite recursion may happen.
This is the only way I found that would force the failure to appear on the
badly implemented code.
We also test for the presence of the lazily imported modules in dir
"""
exe = (sys.executable, '-c', "import numpy; numpy." + name)
result = subprocess.check_output(exe)
assert not result
# Make sure they are still in the __dir__
assert name in dir(np)
def test_dir_testing():
"""Assert that output of dir has only one "testing/tester"
attribute without duplicate"""
assert len(dir(np)) == len(set(dir(np)))
def test_numpy_linalg():
bad_results = check_dir(np.linalg)
assert bad_results == {}
def test_numpy_fft():
bad_results = check_dir(np.fft)
assert bad_results == {}
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available in this python")
def test_NPY_NO_EXPORT():
cdll = ctypes.CDLL(np._core._multiarray_tests.__file__)
# Make sure an arbitrary NPY_NO_EXPORT function is actually hidden
f = getattr(cdll, 'test_not_exported', None)
assert f is None, ("'test_not_exported' is mistakenly exported, "
"NPY_NO_EXPORT does not work")
# Historically NumPy has not used leading underscores for private submodules
# much. This has resulted in lots of things that look like public modules
# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`),
# but were never intended to be public. The PUBLIC_MODULES list contains
# modules that are either public because they were meant to be, or because they
# contain public functions/objects that aren't present in any other namespace
# for whatever reason and therefore should be treated as public.
#
# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack
# of underscores) but should not be used. For many of those modules the
# current status is fine. For others it may make sense to work on making them
# private, to clean up our public API and avoid confusion.
PUBLIC_MODULES = ['numpy.' + s for s in [
"ctypeslib",
"dtypes",
"exceptions",
"f2py",
"fft",
"lib",
"lib.array_utils",
"lib.format",
"lib.introspect",
"lib.mixins",
"lib.npyio",
"lib.recfunctions", # note: still needs cleaning, was forgotten for 2.0
"lib.scimath",
"lib.stride_tricks",
"linalg",
"ma",
"ma.extras",
"ma.mrecords",
"polynomial",
"polynomial.chebyshev",
"polynomial.hermite",
"polynomial.hermite_e",
"polynomial.laguerre",
"polynomial.legendre",
"polynomial.polynomial",
"random",
"strings",
"testing",
"testing.overrides",
"typing",
"typing.mypy_plugin",
"version",
]]
if sys.version_info < (3, 12):
PUBLIC_MODULES += [
'numpy.' + s for s in [
"distutils",
"distutils.cpuinfo",
"distutils.exec_command",
"distutils.misc_util",
"distutils.log",
"distutils.system_info",
]
]
PUBLIC_ALIASED_MODULES = [
"numpy.char",
"numpy.emath",
"numpy.rec",
]
PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
"conftest",
"core",
"core.multiarray",
"core.numeric",
"core.umath",
"core.arrayprint",
"core.defchararray",
"core.einsumfunc",
"core.fromnumeric",
"core.function_base",
"core.getlimits",
"core.numerictypes",
"core.overrides",
"core.records",
"core.shape_base",
"f2py.auxfuncs",
"f2py.capi_maps",
"f2py.cb_rules",
"f2py.cfuncs",
"f2py.common_rules",
"f2py.crackfortran",
"f2py.diagnose",
"f2py.f2py2e",
"f2py.f90mod_rules",
"f2py.func2subr",
"f2py.rules",
"f2py.symbolic",
"f2py.use_rules",
"fft.helper",
"lib.user_array", # note: not in np.lib, but probably should just be deleted
"linalg.lapack_lite",
"linalg.linalg",
"ma.core",
"ma.testutils",
"matlib",
"matrixlib",
"matrixlib.defmatrix",
"polynomial.polyutils",
"random.mtrand",
"random.bit_generator",
"testing.print_coercion_tables",
]]
if sys.version_info < (3, 12):
PRIVATE_BUT_PRESENT_MODULES += [
'numpy.' + s for s in [
"distutils.armccompiler",
"distutils.fujitsuccompiler",
"distutils.ccompiler",
'distutils.ccompiler_opt',
"distutils.command",
"distutils.command.autodist",
"distutils.command.bdist_rpm",
"distutils.command.build",
"distutils.command.build_clib",
"distutils.command.build_ext",
"distutils.command.build_py",
"distutils.command.build_scripts",
"distutils.command.build_src",
"distutils.command.config",
"distutils.command.config_compiler",
"distutils.command.develop",
"distutils.command.egg_info",
"distutils.command.install",
"distutils.command.install_clib",
"distutils.command.install_data",
"distutils.command.install_headers",
"distutils.command.sdist",
"distutils.conv_template",
"distutils.core",
"distutils.extension",
"distutils.fcompiler",
"distutils.fcompiler.absoft",
"distutils.fcompiler.arm",
"distutils.fcompiler.compaq",
"distutils.fcompiler.environment",
"distutils.fcompiler.g95",
"distutils.fcompiler.gnu",
"distutils.fcompiler.hpux",
"distutils.fcompiler.ibm",
"distutils.fcompiler.intel",
"distutils.fcompiler.lahey",
"distutils.fcompiler.mips",
"distutils.fcompiler.nag",
"distutils.fcompiler.none",
"distutils.fcompiler.pathf95",
"distutils.fcompiler.pg",
"distutils.fcompiler.nv",
"distutils.fcompiler.sun",
"distutils.fcompiler.vast",
"distutils.fcompiler.fujitsu",
"distutils.from_template",
"distutils.intelccompiler",
"distutils.lib2def",
"distutils.line_endings",
"distutils.mingw32ccompiler",
"distutils.msvccompiler",
"distutils.npy_pkg_config",
"distutils.numpy_distribution",
"distutils.pathccompiler",
"distutils.unixccompiler",
]
]
def is_unexpected(name):
"""Check if this needs to be considered."""
return (
'._' not in name and '.tests' not in name and '.setup' not in name
and name not in PUBLIC_MODULES
and name not in PUBLIC_ALIASED_MODULES
and name not in PRIVATE_BUT_PRESENT_MODULES
)
if sys.version_info >= (3, 12):
SKIP_LIST = []
else:
SKIP_LIST = ["numpy.distutils.msvc9compiler"]
def test_all_modules_are_expected():
"""
Test that we don't add anything that looks like a new public module by
accident. Check is based on filenames.
"""
modnames = []
for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__,
prefix=np.__name__ + '.',
onerror=None):
if is_unexpected(modname) and modname not in SKIP_LIST:
# We have a name that is new. If that's on purpose, add it to
# PUBLIC_MODULES. We don't expect to have to add anything to
# PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
modnames.append(modname)
if modnames:
raise AssertionError(f'Found unexpected modules: {modnames}')
# Stuff that clearly shouldn't be in the API and is detected by the next test
# below
SKIP_LIST_2 = [
'numpy.lib.math',
'numpy.matlib.char',
'numpy.matlib.rec',
'numpy.matlib.emath',
'numpy.matlib.exceptions',
'numpy.matlib.math',
'numpy.matlib.linalg',
'numpy.matlib.fft',
'numpy.matlib.random',
'numpy.matlib.ctypeslib',
'numpy.matlib.ma',
]
if sys.version_info < (3, 12):
SKIP_LIST_2 += [
'numpy.distutils.log.sys',
'numpy.distutils.log.logging',
'numpy.distutils.log.warnings',
]
def test_all_modules_are_expected_2():
"""
Method checking all objects. The pkgutil-based method in
`test_all_modules_are_expected` does not catch imports into a namespace,
only filenames. So this test is more thorough, and checks this like:
import .lib.scimath as emath
To check if something in a module is (effectively) public, one can check if
there's anything in that namespace that's a public function/object but is
not exposed in a higher-level namespace. For example for a `numpy.lib`
submodule::
mod = np.lib.mixins
for obj in mod.__all__:
if obj in np.__all__:
continue
elif obj in np.lib.__all__:
continue
else:
print(obj)
"""
def find_unexpected_members(mod_name):
members = []
module = importlib.import_module(mod_name)
if hasattr(module, '__all__'):
objnames = module.__all__
else:
objnames = dir(module)
for objname in objnames:
if not objname.startswith('_'):
fullobjname = mod_name + '.' + objname
if isinstance(getattr(module, objname), types.ModuleType):
if is_unexpected(fullobjname):
if fullobjname not in SKIP_LIST_2:
members.append(fullobjname)
return members
unexpected_members = find_unexpected_members("numpy")
for modname in PUBLIC_MODULES:
unexpected_members.extend(find_unexpected_members(modname))
if unexpected_members:
raise AssertionError("Found unexpected object(s) that look like "
f"modules: {unexpected_members}")
def test_api_importable():
"""
Check that all submodules listed higher up in this file can be imported
Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
simply need to be removed from the list (deprecation may or may not be
needed - apply common sense).
"""
def check_importable(module_name):
try:
importlib.import_module(module_name)
except (ImportError, AttributeError):
return False
return True
module_names = []
for module_name in PUBLIC_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules in the public API that cannot be "
f"imported: {module_names}")
for module_name in PUBLIC_ALIASED_MODULES:
try:
eval(module_name)
except AttributeError:
module_names.append(module_name)
if module_names:
raise AssertionError("Modules in the public API that were not "
f"found: {module_names}")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', category=DeprecationWarning)
warnings.filterwarnings('always', category=ImportWarning)
for module_name in PRIVATE_BUT_PRESENT_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules that are not really public but looked "
"public and can not be imported: "
f"{module_names}")
@pytest.mark.xfail(
sysconfig.get_config_var("Py_DEBUG") not in (None, 0, "0"),
reason=(
"NumPy possibly built with `USE_DEBUG=True ./tools/travis-test.sh`, "
"which does not expose the `array_api` entry point. "
"See https://github.com/numpy/numpy/pull/19800"
),
)
def test_array_api_entry_point():
"""
Entry point for Array API implementation can be found with importlib and
returns the main numpy namespace.
"""
# For a development install that did not go through meson-python,
# the entrypoint will not have been installed. So ensure this test fails
# only if numpy is inside site-packages.
numpy_in_sitepackages = sysconfig.get_path('platlib') in np.__file__
eps = importlib.metadata.entry_points()
xp_eps = eps.select(group="array_api")
if len(xp_eps) == 0:
if numpy_in_sitepackages:
msg = "No entry points for 'array_api' found"
raise AssertionError(msg) from None
return
try:
ep = next(ep for ep in xp_eps if ep.name == "numpy")
except StopIteration:
if numpy_in_sitepackages:
msg = "'numpy' not in array_api entry points"
raise AssertionError(msg) from None
return
if ep.value == 'numpy.array_api':
# Looks like the entrypoint for the current numpy build isn't
# installed, but an older numpy is also installed and hence the
# entrypoint is pointing to the old (no longer existing) location.
# This isn't a problem except for when running tests with `spin` or an
# in-place build.
return
xp = ep.load()
msg = (
f"numpy entry point value '{ep.value}' "
"does not point to our Array API implementation"
)
assert xp is numpy, msg
def test_main_namespace_all_dir_coherence():
"""
Checks if `dir(np)` and `np.__all__` are consistent and return
the same content, excluding exceptions and private members.
"""
def _remove_private_members(member_set):
return {m for m in member_set if not m.startswith('_')}
def _remove_exceptions(member_set):
return member_set.difference({
"bool" # included only in __dir__
})
all_members = _remove_private_members(np.__all__)
all_members = _remove_exceptions(all_members)
dir_members = _remove_private_members(np.__dir__())
dir_members = _remove_exceptions(dir_members)
assert all_members == dir_members, (
"Members that break symmetry: "
f"{all_members.symmetric_difference(dir_members)}"
)
@pytest.mark.filterwarnings(
r"ignore:numpy.core(\.\w+)? is deprecated:DeprecationWarning"
)
def test_core_shims_coherence():
"""
Check that all "semi-public" members of `numpy._core` are also accessible
from `numpy.core` shims.
"""
import numpy.core as core
for member_name in dir(np._core):
# Skip private and test members. Also if a module is aliased,
# no need to add it to np.core
if (
member_name.startswith("_")
or member_name in ["tests", "strings"]
or f"numpy.{member_name}" in PUBLIC_ALIASED_MODULES
):
continue
member = getattr(np._core, member_name)
# np.core is a shim and all submodules of np.core are shims
# but we should be able to import everything in those shims
# that are available in the "real" modules in np._core, with
# the exception of the namespace packages (__spec__.origin is None),
# like numpy._core.include, or numpy._core.lib.pkgconfig.
if (
inspect.ismodule(member)
and member.__spec__ and member.__spec__.origin is not None
):
submodule = member
submodule_name = member_name
for submodule_member_name in dir(submodule):
# ignore dunder names
if submodule_member_name.startswith("__"):
continue
submodule_member = getattr(submodule, submodule_member_name)
core_submodule = __import__(
f"numpy.core.{submodule_name}",
fromlist=[submodule_member_name]
)
assert submodule_member is getattr(
core_submodule, submodule_member_name
)
else:
assert member is getattr(core, member_name)
def test_functions_single_location():
"""
Check that each public function is available from one location only.
Test performs BFS search traversing NumPy's public API. It flags
any function-like object that is accessible from more that one place.
"""
from collections.abc import Callable
from typing import Any
from numpy._core._multiarray_umath import (
_ArrayFunctionDispatcher as dispatched_function,
)
visited_modules: set[types.ModuleType] = {np}
visited_functions: set[Callable[..., Any]] = set()
# Functions often have `__name__` overridden, therefore we need
# to keep track of locations where functions have been found.
functions_original_paths: dict[Callable[..., Any], str] = {}
# Here we aggregate functions with more than one location.
# It must be empty for the test to pass.
duplicated_functions: list[tuple] = []
modules_queue = [np]
while len(modules_queue) > 0:
module = modules_queue.pop()
for member_name in dir(module):
member = getattr(module, member_name)
# first check if we got a module
if (
inspect.ismodule(member) and # it's a module
"numpy" in member.__name__ and # inside NumPy
not member_name.startswith("_") and # not private
"numpy._core" not in member.__name__ and # outside _core
# not a legacy or testing module
member_name not in ["f2py", "ma", "testing", "tests"] and
member not in visited_modules # not visited yet
):
modules_queue.append(member)
visited_modules.add(member)
# else check if we got a function-like object
elif (
inspect.isfunction(member) or
isinstance(member, (dispatched_function, np.ufunc))
):
if member in visited_functions:
# skip main namespace functions with aliases
if (
member.__name__ in [
"absolute", # np.abs
"arccos", # np.acos
"arccosh", # np.acosh
"arcsin", # np.asin
"arcsinh", # np.asinh
"arctan", # np.atan
"arctan2", # np.atan2
"arctanh", # np.atanh
"left_shift", # np.bitwise_left_shift
"right_shift", # np.bitwise_right_shift
"conjugate", # np.conj
"invert", # np.bitwise_not & np.bitwise_invert
"remainder", # np.mod
"divide", # np.true_divide
"concatenate", # np.concat
"power", # np.pow
"transpose", # np.permute_dims
] and
module.__name__ == "numpy"
):
continue
# skip trimcoef from numpy.polynomial as it is
# duplicated by design.
if (
member.__name__ == "trimcoef" and
module.__name__.startswith("numpy.polynomial")
):
continue
# skip ufuncs that are exported in np.strings as well
if member.__name__ in (
"add",
"equal",
"not_equal",
"greater",
"greater_equal",
"less",
"less_equal",
) and module.__name__ == "numpy.strings":
continue
# numpy.char reexports all numpy.strings functions for
# backwards-compatibility
if module.__name__ == "numpy.char":
continue
# function is present in more than one location!
duplicated_functions.append(
(member.__name__,
module.__name__,
functions_original_paths[member])
)
else:
visited_functions.add(member)
functions_original_paths[member] = module.__name__
del visited_functions, visited_modules, functions_original_paths
assert len(duplicated_functions) == 0, duplicated_functions
def test___module___attribute():
modules_queue = [np]
visited_modules = {np}
visited_functions = set()
incorrect_entries = []
while len(modules_queue) > 0:
module = modules_queue.pop()
for member_name in dir(module):
member = getattr(module, member_name)
# first check if we got a module
if (
inspect.ismodule(member) and # it's a module
"numpy" in member.__name__ and # inside NumPy
not member_name.startswith("_") and # not private
"numpy._core" not in member.__name__ and # outside _core
# not in a skip module list
member_name not in [
"char", "core", "f2py", "ma", "lapack_lite", "mrecords",
"testing", "tests", "polynomial", "typing", "mtrand",
"bit_generator",
] and
member not in visited_modules # not visited yet
):
modules_queue.append(member)
visited_modules.add(member)
elif (
not inspect.ismodule(member) and
hasattr(member, "__name__") and
not member.__name__.startswith("_") and
member.__module__ != module.__name__ and
member not in visited_functions
):
# skip ufuncs that are exported in np.strings as well
if member.__name__ in (
"add", "equal", "not_equal", "greater", "greater_equal",
"less", "less_equal",
) and module.__name__ == "numpy.strings":
continue
# recarray and record are exported in np and np.rec
if (
(member.__name__ == "recarray" and module.__name__ == "numpy") or
(member.__name__ == "record" and module.__name__ == "numpy.rec")
):
continue
# ctypeslib exports ctypes c_long/c_longlong
if (
member.__name__ in ("c_long", "c_longlong") and
module.__name__ == "numpy.ctypeslib"
):
continue
# skip cdef classes
if member.__name__ in (
"BitGenerator", "Generator", "MT19937", "PCG64", "PCG64DXSM",
"Philox", "RandomState", "SFC64", "SeedSequence",
):
continue
incorrect_entries.append(
{
"Func": member.__name__,
"actual": member.__module__,
"expected": module.__name__,
}
)
visited_functions.add(member)
if incorrect_entries:
assert len(incorrect_entries) == 0, incorrect_entries
def _check_correct_qualname_and_module(obj) -> bool:
qualname = obj.__qualname__
name = obj.__name__
module_name = obj.__module__
assert name == qualname.split(".")[-1]
module = sys.modules[module_name]
actual_obj = functools.reduce(getattr, qualname.split("."), module)
return (
actual_obj is obj or
# `obj` may be a bound method/property of `actual_obj`:
(
hasattr(actual_obj, "__get__") and hasattr(obj, "__self__") and
actual_obj.__module__ == obj.__module__ and
actual_obj.__qualname__ == qualname
)
)
def test___qualname___and___module___attribute():
# NumPy messes with module and name/qualname attributes, but any object
# should be discoverable based on its module and qualname, so test that.
# We do this for anything with a name (ensuring qualname is also set).
modules_queue = [np]
visited_modules = {np}
visited_functions = set()
incorrect_entries = []
while len(modules_queue) > 0:
module = modules_queue.pop()
for member_name in dir(module):
member = getattr(module, member_name)
# first check if we got a module
if (
inspect.ismodule(member) and # it's a module
"numpy" in member.__name__ and # inside NumPy
not member_name.startswith("_") and # not private
member_name not in {"tests", "typing"} and # 2024-12: type names don't match
"numpy._core" not in member.__name__ and # outside _core
member not in visited_modules # not visited yet
):
modules_queue.append(member)
visited_modules.add(member)
elif (
not inspect.ismodule(member) and
hasattr(member, "__name__") and
not member.__name__.startswith("_") and
not member_name.startswith("_") and
not _check_correct_qualname_and_module(member) and
member not in visited_functions
):
incorrect_entries.append(
{
"found_at": f"{module.__name__}:{member_name}",
"advertises": f"{member.__module__}:{member.__qualname__}",
}
)
visited_functions.add(member)
if incorrect_entries:
assert len(incorrect_entries) == 0, incorrect_entries

View File

@ -0,0 +1,74 @@
import pickle
import subprocess
import sys
import textwrap
from importlib import reload
import pytest
import numpy.exceptions as ex
from numpy.testing import (
IS_WASM,
assert_,
assert_equal,
assert_raises,
assert_warns,
)
def test_numpy_reloading():
# gh-7844. Also check that relevant globals retain their identity.
import numpy as np
import numpy._globals
_NoValue = np._NoValue
VisibleDeprecationWarning = ex.VisibleDeprecationWarning
ModuleDeprecationWarning = ex.ModuleDeprecationWarning
with assert_warns(UserWarning):
reload(np)
assert_(_NoValue is np._NoValue)
assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning)
assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning)
assert_raises(RuntimeError, reload, numpy._globals)
with assert_warns(UserWarning):
reload(np)
assert_(_NoValue is np._NoValue)
assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning)
assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning)
def test_novalue():
import numpy as np
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_equal(repr(np._NoValue), '<no value>')
assert_(pickle.loads(pickle.dumps(np._NoValue,
protocol=proto)) is np._NoValue)
@pytest.mark.skipif(IS_WASM, reason="can't start subprocess")
def test_full_reimport():
"""At the time of writing this, it is *not* truly supported, but
apparently enough users rely on it, for it to be an annoying change
when it started failing previously.
"""
# Test within a new process, to ensure that we do not mess with the
# global state during the test run (could lead to cryptic test failures).
# This is generally unsafe, especially, since we also reload the C-modules.
code = textwrap.dedent(r"""
import sys
from pytest import warns
import numpy as np
for k in list(sys.modules.keys()):
if "numpy" in k:
del sys.modules[k]
with warns(UserWarning):
import numpy as np
""")
p = subprocess.run([sys.executable, '-c', code], capture_output=True)
if p.returncode:
raise AssertionError(
f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}"
)

View File

@ -0,0 +1,49 @@
""" Test scripts
Test that we can run executable scripts that have been installed with numpy.
"""
import os
import subprocess
import sys
from os.path import dirname, isfile
from os.path import join as pathjoin
import pytest
import numpy as np
from numpy.testing import IS_WASM, assert_equal
is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py'))
def find_f2py_commands():
if sys.platform == 'win32':
exe_dir = dirname(sys.executable)
if exe_dir.endswith('Scripts'): # virtualenv
return [os.path.join(exe_dir, 'f2py')]
else:
return [os.path.join(exe_dir, "Scripts", 'f2py')]
else:
# Three scripts are installed in Unix-like systems:
# 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example,
# if installed with python3.9 the scripts would be named
# 'f2py', 'f2py3', and 'f2py3.9'.
version = sys.version_info
major = str(version.major)
minor = str(version.minor)
return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor]
@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace")
@pytest.mark.xfail(reason="Test is unreliable")
@pytest.mark.parametrize('f2py_cmd', find_f2py_commands())
def test_f2py(f2py_cmd):
# test that we can run f2py script
stdout = subprocess.check_output([f2py_cmd, '-v'])
assert_equal(stdout.strip(), np.__version__.encode('ascii'))
@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
def test_pep338():
stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v'])
assert_equal(stdout.strip(), np.__version__.encode('ascii'))

View File

@ -0,0 +1,78 @@
"""
Tests which scan for certain occurrences in the code, they may not find
all of these occurrences but should catch almost all.
"""
import ast
import tokenize
from pathlib import Path
import pytest
import numpy
class ParseCall(ast.NodeVisitor):
def __init__(self):
self.ls = []
def visit_Attribute(self, node):
ast.NodeVisitor.generic_visit(self, node)
self.ls.append(node.attr)
def visit_Name(self, node):
self.ls.append(node.id)
class FindFuncs(ast.NodeVisitor):
def __init__(self, filename):
super().__init__()
self.__filename = filename
def visit_Call(self, node):
p = ParseCall()
p.visit(node.func)
ast.NodeVisitor.generic_visit(self, node)
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
if node.args[0].value == "ignore":
raise AssertionError(
"warnings should have an appropriate stacklevel; "
f"found in {self.__filename} on line {node.lineno}")
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
if "testing/tests/test_warnings.py" == self.__filename:
# This file
return
# See if stacklevel exists:
if len(node.args) == 3:
return
args = {kw.arg for kw in node.keywords}
if "stacklevel" in args:
return
raise AssertionError(
"warnings should have an appropriate stacklevel; "
f"found in {self.__filename} on line {node.lineno}")
@pytest.mark.slow
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(numpy.__file__).parent
for path in base.rglob("*.py"):
if base / "testing" in path.parents:
continue
if path == base / "__init__.py":
continue
if path == base / "random" / "__init__.py":
continue
if path == base / "conftest.py":
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read())
FindFuncs(path).visit(tree)